aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c1
-rw-r--r--drivers/gpu/drm/drm_bufs.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_debugfs.c1
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_fops.c17
-rw-r--r--drivers/gpu/drm/drm_hashtab.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_proc.c1
-rw-r--r--drivers/gpu/drm/drm_scatter.c1
-rw-r--r--drivers/gpu/drm/drm_stub.c5
-rw-r--r--drivers/gpu/drm/drm_sysfs.c22
-rw-r--r--drivers/gpu/drm/drm_vm.c1
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c3
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c15
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c284
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c17
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c54
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c69
-rw-r--r--drivers/gpu/drm/i915/intel_display.c108
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c257
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h18
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c93
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c87
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c1
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c82
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c23
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c732
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c96
-rw-r--r--drivers/gpu/drm/nouveau/Makefile4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c55
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c610
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c124
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c26
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c29
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c25
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c1
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c105
-rw-r--r--drivers/gpu/drm/radeon/atom.h8
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
-rw-r--r--drivers/gpu/drm/radeon/r100.c49
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h3
-rw-r--r--drivers/gpu/drm/radeon/r200.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c29
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c9
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c54
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c35
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c200
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h10
-rw-r--r--drivers/gpu/drm/radeon/r600d.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c772
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h545
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c472
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c291
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c76
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c217
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4202
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r60075
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6002
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5153
-rw-r--r--drivers/gpu/drm/radeon/rs400.c8
-rw-r--r--drivers/gpu/drm/radeon/rs600.c35
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h53
-rw-r--r--drivers/gpu/drm/radeon/rs690.c122
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c46
-rw-r--r--drivers/gpu/drm/radeon/rv770.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c24
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c1
-rw-r--r--drivers/gpu/drm/via/via_video.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
151 files changed, 4755 insertions, 2903 deletions
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index d68888fe3df9..ba38e0147220 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -33,6 +33,7 @@
33 33
34#include "drmP.h" 34#include "drmP.h"
35#include <linux/module.h> 35#include <linux/module.h>
36#include <linux/slab.h>
36 37
37#if __OS_HAS_AGP 38#if __OS_HAS_AGP
38 39
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8417cc4c43f1..f7ba82ebf65a 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -34,6 +34,7 @@
34 */ 34 */
35 35
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/slab.h>
37#include <linux/log2.h> 38#include <linux/log2.h>
38#include <asm/shmparam.h> 39#include <asm/shmparam.h>
39#include "drmP.h" 40#include "drmP.h"
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d91fb8c0b7b3..61b9bcfdf040 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -30,6 +30,7 @@
30 * Jesse Barnes <jesse.barnes@intel.com> 30 * Jesse Barnes <jesse.barnes@intel.com>
31 */ 31 */
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/slab.h>
33#include "drm.h" 34#include "drm.h"
34#include "drmP.h" 35#include "drmP.h"
35#include "drm_crtc.h" 36#include "drm_crtc.h"
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f2aaf39be398..51103aa469f8 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
104 if (connector->status == connector_status_disconnected) { 104 if (connector->status == connector_status_disconnected) {
105 DRM_DEBUG_KMS("%s is disconnected\n", 105 DRM_DEBUG_KMS("%s is disconnected\n",
106 drm_get_connector_name(connector)); 106 drm_get_connector_name(connector));
107 drm_mode_connector_update_edid_property(connector, NULL);
107 goto prune; 108 goto prune;
108 } 109 }
109 110
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 9903f270e440..677b275fa721 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/debugfs.h> 33#include <linux/debugfs.h>
34#include <linux/seq_file.h> 34#include <linux/seq_file.h>
35#include <linux/slab.h>
35#include "drmP.h" 36#include "drmP.h"
36 37
37#if defined(CONFIG_DEBUG_FS) 38#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index 548887c8506f..f7eba0a0973a 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -23,7 +23,6 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/slab.h>
27#include <linux/init.h> 26#include <linux/init.h>
28#include <linux/errno.h> 27#include <linux/errno.h>
29#include <linux/sched.h> 28#include <linux/sched.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f3c58e2bd75c..4a66201edaec 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -47,6 +47,7 @@
47 */ 47 */
48 48
49#include <linux/debugfs.h> 49#include <linux/debugfs.h>
50#include <linux/slab.h>
50#include "drmP.h" 51#include "drmP.h"
51#include "drm_core.h" 52#include "drm_core.h"
52 53
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f97e7c42ac8e..18f41d7061f0 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -27,6 +27,7 @@
27 * DEALINGS IN THE SOFTWARE. 27 * DEALINGS IN THE SOFTWARE.
28 */ 28 */
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/slab.h>
30#include <linux/i2c.h> 31#include <linux/i2c.h>
31#include <linux/i2c-algo-bit.h> 32#include <linux/i2c-algo-bit.h>
32#include "drmP.h" 33#include "drmP.h"
@@ -84,6 +85,8 @@ static struct edid_quirk {
84 85
85 /* Envision Peripherals, Inc. EN-7100e */ 86 /* Envision Peripherals, Inc. EN-7100e */
86 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH }, 87 { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
88 /* Envision EN2028 */
89 { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
87 90
88 /* Funai Electronics PM36B */ 91 /* Funai Electronics PM36B */
89 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | 92 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
@@ -707,15 +710,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
707 mode->vsync_end = mode->vsync_start + vsync_pulse_width; 710 mode->vsync_end = mode->vsync_start + vsync_pulse_width;
708 mode->vtotal = mode->vdisplay + vblank; 711 mode->vtotal = mode->vdisplay + vblank;
709 712
710 /* perform the basic check for the detailed timing */
711 if (mode->hsync_end > mode->htotal ||
712 mode->vsync_end > mode->vtotal) {
713 drm_mode_destroy(dev, mode);
714 DRM_DEBUG_KMS("Incorrect detailed timing. "
715 "Sync is beyond the blank.\n");
716 return NULL;
717 }
718
719 /* Some EDIDs have bogus h/vtotal values */ 713 /* Some EDIDs have bogus h/vtotal values */
720 if (mode->hsync_end > mode->htotal) 714 if (mode->hsync_end > mode->htotal)
721 mode->htotal = mode->hsync_end + 1; 715 mode->htotal = mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 50549703584f..288ea2f32772 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -29,6 +29,7 @@
29 */ 29 */
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/sysrq.h> 31#include <linux/sysrq.h>
32#include <linux/slab.h>
32#include <linux/fb.h> 33#include <linux/fb.h>
33#include "drmP.h" 34#include "drmP.h"
34#include "drm_crtc.h" 35#include "drm_crtc.h"
@@ -283,6 +284,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
283 .help_msg = "force-fb(V)", 284 .help_msg = "force-fb(V)",
284 .action_msg = "Restore framebuffer console", 285 .action_msg = "Restore framebuffer console",
285}; 286};
287#else
288static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
286#endif 289#endif
287 290
288static void drm_fb_helper_on(struct fb_info *info) 291static void drm_fb_helper_on(struct fb_info *info)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df3bb42..9d532d7fdf59 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -36,6 +36,7 @@
36 36
37#include "drmP.h" 37#include "drmP.h"
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/slab.h>
39#include <linux/smp_lock.h> 40#include <linux/smp_lock.h>
40 41
41static int drm_open_helper(struct inode *inode, struct file *filp, 42static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -140,14 +141,16 @@ int drm_open(struct inode *inode, struct file *filp)
140 spin_unlock(&dev->count_lock); 141 spin_unlock(&dev->count_lock);
141 } 142 }
142out: 143out:
143 mutex_lock(&dev->struct_mutex); 144 if (!retcode) {
144 if (minor->type == DRM_MINOR_LEGACY) { 145 mutex_lock(&dev->struct_mutex);
145 BUG_ON((dev->dev_mapping != NULL) && 146 if (minor->type == DRM_MINOR_LEGACY) {
146 (dev->dev_mapping != inode->i_mapping)); 147 if (dev->dev_mapping == NULL)
147 if (dev->dev_mapping == NULL) 148 dev->dev_mapping = inode->i_mapping;
148 dev->dev_mapping = inode->i_mapping; 149 else if (dev->dev_mapping != inode->i_mapping)
150 retcode = -ENODEV;
151 }
152 mutex_unlock(&dev->struct_mutex);
149 } 153 }
150 mutex_unlock(&dev->struct_mutex);
151 154
152 return retcode; 155 return retcode;
153} 156}
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index f36b21c5b2e1..a93d7b4ddaa6 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -35,6 +35,7 @@
35#include "drmP.h" 35#include "drmP.h"
36#include "drm_hashtab.h" 36#include "drm_hashtab.h"
37#include <linux/hash.h> 37#include <linux/hash.h>
38#include <linux/slab.h>
38 39
39int drm_ht_create(struct drm_open_hash *ht, unsigned int order) 40int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
40{ 41{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index b98384dbd9a7..a263b7070fc6 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -36,6 +36,7 @@
36#include "drmP.h" 36#include "drmP.h"
37 37
38#include <linux/interrupt.h> /* For task queue support */ 38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/slab.h>
39 40
40#include <linux/vgaarb.h> 41#include <linux/vgaarb.h>
41/** 42/**
@@ -475,6 +476,7 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
475 unsigned long irqflags; 476 unsigned long irqflags;
476 477
477 spin_lock_irqsave(&dev->vbl_lock, irqflags); 478 spin_lock_irqsave(&dev->vbl_lock, irqflags);
479 dev->driver->disable_vblank(dev, crtc);
478 DRM_WAKEUP(&dev->vbl_queue[crtc]); 480 DRM_WAKEUP(&dev->vbl_queue[crtc]);
479 dev->vblank_enabled[crtc] = 0; 481 dev->vblank_enabled[crtc] = 0;
480 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 482 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4865f99989c..7732268eced2 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -77,7 +77,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
77 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >= 77 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
78 (offset + size)) 78 (offset + size))
79 break; 79 break;
80 if (!agpmem) 80 if (&agpmem->head == &dev->agp->memory)
81 return NULL; 81 return NULL;
82 82
83 /* 83 /*
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e68ebf92fa2a..2ea9ad4a8d69 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -37,6 +37,7 @@
37 */ 37 */
38 38
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/slab.h>
40#include <linux/dma-mapping.h> 41#include <linux/dma-mapping.h>
41#include "drmP.h" 42#include "drmP.h"
42 43
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
index d379c4f2892f..a9ba6b69ad35 100644
--- a/drivers/gpu/drm/drm_proc.c
+++ b/drivers/gpu/drm/drm_proc.c
@@ -38,6 +38,7 @@
38 */ 38 */
39 39
40#include <linux/seq_file.h> 40#include <linux/seq_file.h>
41#include <linux/slab.h>
41#include "drmP.h" 42#include "drmP.h"
42 43
43/*************************************************** 44/***************************************************
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index c7823c863d4f..9034c4c6100d 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/slab.h>
35#include "drmP.h" 36#include "drmP.h"
36 37
37#define DEBUG_SCATTER 0 38#define DEBUG_SCATTER 0
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index ad73e141afdb..a0c365f2e521 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -33,6 +33,7 @@
33 33
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/moduleparam.h> 35#include <linux/moduleparam.h>
36#include <linux/slab.h>
36#include "drmP.h" 37#include "drmP.h"
37#include "drm_core.h" 38#include "drm_core.h"
38 39
@@ -515,8 +516,6 @@ void drm_put_dev(struct drm_device *dev)
515 } 516 }
516 driver = dev->driver; 517 driver = dev->driver;
517 518
518 drm_vblank_cleanup(dev);
519
520 drm_lastclose(dev); 519 drm_lastclose(dev);
521 520
522 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && 521 if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
@@ -536,6 +535,8 @@ void drm_put_dev(struct drm_device *dev)
536 dev->agp = NULL; 535 dev->agp = NULL;
537 } 536 }
538 537
538 drm_vblank_cleanup(dev);
539
539 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) 540 list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
540 drm_rmmap(dev, r_list->map); 541 drm_rmmap(dev, r_list->map);
541 drm_ht_remove(&dev->map_hash); 542 drm_ht_remove(&dev->map_hash);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 014ce24761b9..25bbd30ed7af 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/kdev_t.h> 16#include <linux/kdev_t.h>
17#include <linux/gfp.h>
17#include <linux/err.h> 18#include <linux/err.h>
18 19
19#include "drm_sysfs.h" 20#include "drm_sysfs.h"
@@ -353,7 +354,10 @@ static struct bin_attribute edid_attr = {
353int drm_sysfs_connector_add(struct drm_connector *connector) 354int drm_sysfs_connector_add(struct drm_connector *connector)
354{ 355{
355 struct drm_device *dev = connector->dev; 356 struct drm_device *dev = connector->dev;
356 int ret = 0, i, j; 357 int attr_cnt = 0;
358 int opt_cnt = 0;
359 int i;
360 int ret = 0;
357 361
358 /* We shouldn't get called more than once for the same connector */ 362 /* We shouldn't get called more than once for the same connector */
359 BUG_ON(device_is_registered(&connector->kdev)); 363 BUG_ON(device_is_registered(&connector->kdev));
@@ -376,8 +380,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
376 380
377 /* Standard attributes */ 381 /* Standard attributes */
378 382
379 for (i = 0; i < ARRAY_SIZE(connector_attrs); i++) { 383 for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
380 ret = device_create_file(&connector->kdev, &connector_attrs[i]); 384 ret = device_create_file(&connector->kdev, &connector_attrs[attr_cnt]);
381 if (ret) 385 if (ret)
382 goto err_out_files; 386 goto err_out_files;
383 } 387 }
@@ -393,8 +397,8 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
393 case DRM_MODE_CONNECTOR_SVIDEO: 397 case DRM_MODE_CONNECTOR_SVIDEO:
394 case DRM_MODE_CONNECTOR_Component: 398 case DRM_MODE_CONNECTOR_Component:
395 case DRM_MODE_CONNECTOR_TV: 399 case DRM_MODE_CONNECTOR_TV:
396 for (i = 0; i < ARRAY_SIZE(connector_attrs_opt1); i++) { 400 for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
397 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[i]); 401 ret = device_create_file(&connector->kdev, &connector_attrs_opt1[opt_cnt]);
398 if (ret) 402 if (ret)
399 goto err_out_files; 403 goto err_out_files;
400 } 404 }
@@ -413,10 +417,10 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
413 return 0; 417 return 0;
414 418
415err_out_files: 419err_out_files:
416 if (i > 0) 420 for (i = 0; i < opt_cnt; i++)
417 for (j = 0; j < i; j++) 421 device_remove_file(&connector->kdev, &connector_attrs_opt1[i]);
418 device_remove_file(&connector->kdev, 422 for (i = 0; i < attr_cnt; i++)
419 &connector_attrs[i]); 423 device_remove_file(&connector->kdev, &connector_attrs[i]);
420 device_unregister(&connector->kdev); 424 device_unregister(&connector->kdev);
421 425
422out: 426out:
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 4ac900f4647f..c3b13fb41d0c 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -36,6 +36,7 @@
36#include "drmP.h" 36#include "drmP.h"
37#if defined(__ia64__) 37#if defined(__ia64__)
38#include <linux/efi.h> 38#include <linux/efi.h>
39#include <linux/slab.h>
39#endif 40#endif
40 41
41static void drm_vm_open(struct vm_area_struct *vma); 42static void drm_vm_open(struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index de32d22a8c39..997d91707ad2 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -36,6 +36,7 @@
36#include "i810_drv.h" 36#include "i810_drv.h"
37#include <linux/interrupt.h> /* For task queue support */ 37#include <linux/interrupt.h> /* For task queue support */
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/slab.h>
39#include <linux/pagemap.h> 40#include <linux/pagemap.h>
40 41
41#define I810_BUF_FREE 2 42#define I810_BUF_FREE 2
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 06bd732e6463..65759a9a85c8 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -38,6 +38,7 @@
38#include <linux/interrupt.h> /* For task queue support */ 38#include <linux/interrupt.h> /* For task queue support */
39#include <linux/pagemap.h> 39#include <linux/pagemap.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/slab.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42 43
43#define I830_BUF_FREE 2 44#define I830_BUF_FREE 2
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1376dfe44c95..a0b8447b06e7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/debugfs.h> 30#include <linux/debugfs.h>
31#include <linux/slab.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "drm.h" 33#include "drm.h"
33#include "i915_drm.h" 34#include "i915_drm.h"
@@ -225,7 +226,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
225 } else { 226 } else {
226 struct drm_i915_gem_object *obj_priv; 227 struct drm_i915_gem_object *obj_priv;
227 228
228 obj_priv = obj->driver_private; 229 obj_priv = to_intel_bo(obj);
229 seq_printf(m, "Fenced object[%2d] = %p: %s " 230 seq_printf(m, "Fenced object[%2d] = %p: %s "
230 "%08x %08zx %08x %s %08x %08x %d", 231 "%08x %08zx %08x %s %08x %08x %d",
231 i, obj, get_pin_flag(obj_priv), 232 i, obj, get_pin_flag(obj_priv),
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9f8589490cf..c3cfafcbfe7d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -38,6 +38,7 @@
38#include <linux/acpi.h> 38#include <linux/acpi.h>
39#include <linux/pnp.h> 39#include <linux/pnp.h>
40#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
41#include <linux/slab.h>
41 42
42/* Really want an OS-independent resettable timer. Would like to have 43/* Really want an OS-independent resettable timer. Would like to have
43 * this loop run for (eg) 3 sec, but have the timer reset every time 44 * this loop run for (eg) 3 sec, but have the timer reset every time
@@ -1356,6 +1357,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1356 1357
1357 dev_priv->cfb_size = size; 1358 dev_priv->cfb_size = size;
1358 1359
1360 dev_priv->compressed_fb = compressed_fb;
1361
1359 if (IS_GM45(dev)) { 1362 if (IS_GM45(dev)) {
1360 g4x_disable_fbc(dev); 1363 g4x_disable_fbc(dev);
1361 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1364 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1363,12 +1366,22 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1363 i8xx_disable_fbc(dev); 1366 i8xx_disable_fbc(dev);
1364 I915_WRITE(FBC_CFB_BASE, cfb_base); 1367 I915_WRITE(FBC_CFB_BASE, cfb_base);
1365 I915_WRITE(FBC_LL_BASE, ll_base); 1368 I915_WRITE(FBC_LL_BASE, ll_base);
1369 dev_priv->compressed_llb = compressed_llb;
1366 } 1370 }
1367 1371
1368 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1372 DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
1369 ll_base, size >> 20); 1373 ll_base, size >> 20);
1370} 1374}
1371 1375
1376static void i915_cleanup_compression(struct drm_device *dev)
1377{
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379
1380 drm_mm_put_block(dev_priv->compressed_fb);
1381 if (!IS_GM45(dev))
1382 drm_mm_put_block(dev_priv->compressed_llb);
1383}
1384
1372/* true = enable decode, false = disable decoder */ 1385/* true = enable decode, false = disable decoder */
1373static unsigned int i915_vga_set_decode(void *cookie, bool state) 1386static unsigned int i915_vga_set_decode(void *cookie, bool state)
1374{ 1387{
@@ -1786,6 +1799,8 @@ int i915_driver_unload(struct drm_device *dev)
1786 mutex_lock(&dev->struct_mutex); 1799 mutex_lock(&dev->struct_mutex);
1787 i915_gem_cleanup_ringbuffer(dev); 1800 i915_gem_cleanup_ringbuffer(dev);
1788 mutex_unlock(&dev->struct_mutex); 1801 mutex_unlock(&dev->struct_mutex);
1802 if (I915_HAS_FBC(dev) && i915_powersave)
1803 i915_cleanup_compression(dev);
1789 drm_mm_takedown(&dev_priv->vram); 1804 drm_mm_takedown(&dev_priv->vram);
1790 i915_gem_lastclose(dev); 1805 i915_gem_lastclose(dev);
1791 1806
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 4b26919abdb2..cc03537bb883 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -69,7 +69,8 @@ const static struct intel_device_info intel_845g_info = {
69}; 69};
70 70
71const static struct intel_device_info intel_i85x_info = { 71const static struct intel_device_info intel_i85x_info = {
72 .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 72 .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
73 .cursor_needs_physical = 1,
73}; 74};
74 75
75const static struct intel_device_info intel_i865g_info = { 76const static struct intel_device_info intel_i865g_info = {
@@ -80,14 +81,14 @@ const static struct intel_device_info intel_i915g_info = {
80 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 81 .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
81}; 82};
82const static struct intel_device_info intel_i915gm_info = { 83const static struct intel_device_info intel_i915gm_info = {
83 .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 84 .is_i9xx = 1, .is_mobile = 1,
84 .cursor_needs_physical = 1, 85 .cursor_needs_physical = 1,
85}; 86};
86const static struct intel_device_info intel_i945g_info = { 87const static struct intel_device_info intel_i945g_info = {
87 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 88 .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
88}; 89};
89const static struct intel_device_info intel_i945gm_info = { 90const static struct intel_device_info intel_i945gm_info = {
90 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 91 .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
91 .has_hotplug = 1, .cursor_needs_physical = 1, 92 .has_hotplug = 1, .cursor_needs_physical = 1,
92}; 93};
93 94
@@ -151,7 +152,7 @@ const static struct pci_device_id pciidlist[] = {
151 INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 152 INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
152 INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 153 INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
153 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 154 INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
154 INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), 155 INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
155 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 156 INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
156 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 157 INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
157 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 158 INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
@@ -361,7 +362,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
361 !dev_priv->mm.suspended) { 362 !dev_priv->mm.suspended) {
362 drm_i915_ring_buffer_t *ring = &dev_priv->ring; 363 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
363 struct drm_gem_object *obj = ring->ring_obj; 364 struct drm_gem_object *obj = ring->ring_obj;
364 struct drm_i915_gem_object *obj_priv = obj->driver_private; 365 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
365 dev_priv->mm.suspended = 0; 366 dev_priv->mm.suspended = 0;
366 367
367 /* Stop the ring if it's running. */ 368 /* Stop the ring if it's running. */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aba8260fbc5e..6e4790065d9e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -195,6 +195,7 @@ struct intel_overlay;
195struct intel_device_info { 195struct intel_device_info {
196 u8 is_mobile : 1; 196 u8 is_mobile : 1;
197 u8 is_i8xx : 1; 197 u8 is_i8xx : 1;
198 u8 is_i85x : 1;
198 u8 is_i915g : 1; 199 u8 is_i915g : 1;
199 u8 is_i9xx : 1; 200 u8 is_i9xx : 1;
200 u8 is_i945gm : 1; 201 u8 is_i945gm : 1;
@@ -235,11 +236,14 @@ typedef struct drm_i915_private {
235 236
236 drm_dma_handle_t *status_page_dmah; 237 drm_dma_handle_t *status_page_dmah;
237 void *hw_status_page; 238 void *hw_status_page;
239 void *seqno_page;
238 dma_addr_t dma_status_page; 240 dma_addr_t dma_status_page;
239 uint32_t counter; 241 uint32_t counter;
240 unsigned int status_gfx_addr; 242 unsigned int status_gfx_addr;
243 unsigned int seqno_gfx_addr;
241 drm_local_map_t hws_map; 244 drm_local_map_t hws_map;
242 struct drm_gem_object *hws_obj; 245 struct drm_gem_object *hws_obj;
246 struct drm_gem_object *seqno_obj;
243 struct drm_gem_object *pwrctx; 247 struct drm_gem_object *pwrctx;
244 248
245 struct resource mch_res; 249 struct resource mch_res;
@@ -611,6 +615,8 @@ typedef struct drm_i915_private {
611 /* Reclocking support */ 615 /* Reclocking support */
612 bool render_reclock_avail; 616 bool render_reclock_avail;
613 bool lvds_downclock_avail; 617 bool lvds_downclock_avail;
618 /* indicate whether the LVDS EDID is OK */
619 bool lvds_edid_good;
614 /* indicates the reduced downclock for LVDS*/ 620 /* indicates the reduced downclock for LVDS*/
615 int lvds_downclock; 621 int lvds_downclock;
616 struct work_struct idle_work; 622 struct work_struct idle_work;
@@ -628,6 +634,9 @@ typedef struct drm_i915_private {
628 u8 max_delay; 634 u8 max_delay;
629 635
630 enum no_fbc_reason no_fbc_reason; 636 enum no_fbc_reason no_fbc_reason;
637
638 struct drm_mm_node *compressed_fb;
639 struct drm_mm_node *compressed_llb;
631} drm_i915_private_t; 640} drm_i915_private_t;
632 641
633/** driver private structure attached to each drm_gem_object */ 642/** driver private structure attached to each drm_gem_object */
@@ -731,6 +740,8 @@ struct drm_i915_gem_object {
731 atomic_t pending_flip; 740 atomic_t pending_flip;
732}; 741};
733 742
743#define to_intel_bo(x) ((struct drm_i915_gem_object *) (x)->driver_private)
744
734/** 745/**
735 * Request queue structure. 746 * Request queue structure.
736 * 747 *
@@ -1066,7 +1077,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1066 1077
1067#define IS_I830(dev) ((dev)->pci_device == 0x3577) 1078#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1068#define IS_845G(dev) ((dev)->pci_device == 0x2562) 1079#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1069#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1080#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1070#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1081#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1071#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) 1082#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
1072#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 1083#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
@@ -1131,6 +1142,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
1131 1142
1132#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ 1143#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
1133 IS_GEN6(dev)) 1144 IS_GEN6(dev))
1145#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
1134 1146
1135#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1147#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1136 1148
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 933e865a8929..ef3d91dda71a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_trace.h" 32#include "i915_trace.h"
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include <linux/slab.h>
34#include <linux/swap.h> 35#include <linux/swap.h>
35#include <linux/pci.h> 36#include <linux/pci.h>
36 37
@@ -162,7 +163,7 @@ fast_shmem_read(struct page **pages,
162static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) 163static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
163{ 164{
164 drm_i915_private_t *dev_priv = obj->dev->dev_private; 165 drm_i915_private_t *dev_priv = obj->dev->dev_private;
165 struct drm_i915_gem_object *obj_priv = obj->driver_private; 166 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
166 167
167 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 168 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
168 obj_priv->tiling_mode != I915_TILING_NONE; 169 obj_priv->tiling_mode != I915_TILING_NONE;
@@ -263,7 +264,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
263 struct drm_i915_gem_pread *args, 264 struct drm_i915_gem_pread *args,
264 struct drm_file *file_priv) 265 struct drm_file *file_priv)
265{ 266{
266 struct drm_i915_gem_object *obj_priv = obj->driver_private; 267 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
267 ssize_t remain; 268 ssize_t remain;
268 loff_t offset, page_base; 269 loff_t offset, page_base;
269 char __user *user_data; 270 char __user *user_data;
@@ -284,7 +285,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
284 if (ret != 0) 285 if (ret != 0)
285 goto fail_put_pages; 286 goto fail_put_pages;
286 287
287 obj_priv = obj->driver_private; 288 obj_priv = to_intel_bo(obj);
288 offset = args->offset; 289 offset = args->offset;
289 290
290 while (remain > 0) { 291 while (remain > 0) {
@@ -353,7 +354,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
353 struct drm_i915_gem_pread *args, 354 struct drm_i915_gem_pread *args,
354 struct drm_file *file_priv) 355 struct drm_file *file_priv)
355{ 356{
356 struct drm_i915_gem_object *obj_priv = obj->driver_private; 357 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
357 struct mm_struct *mm = current->mm; 358 struct mm_struct *mm = current->mm;
358 struct page **user_pages; 359 struct page **user_pages;
359 ssize_t remain; 360 ssize_t remain;
@@ -402,7 +403,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
402 if (ret != 0) 403 if (ret != 0)
403 goto fail_put_pages; 404 goto fail_put_pages;
404 405
405 obj_priv = obj->driver_private; 406 obj_priv = to_intel_bo(obj);
406 offset = args->offset; 407 offset = args->offset;
407 408
408 while (remain > 0) { 409 while (remain > 0) {
@@ -478,7 +479,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
478 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 479 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
479 if (obj == NULL) 480 if (obj == NULL)
480 return -EBADF; 481 return -EBADF;
481 obj_priv = obj->driver_private; 482 obj_priv = to_intel_bo(obj);
482 483
483 /* Bounds check source. 484 /* Bounds check source.
484 * 485 *
@@ -580,7 +581,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
580 struct drm_i915_gem_pwrite *args, 581 struct drm_i915_gem_pwrite *args,
581 struct drm_file *file_priv) 582 struct drm_file *file_priv)
582{ 583{
583 struct drm_i915_gem_object *obj_priv = obj->driver_private; 584 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
584 drm_i915_private_t *dev_priv = dev->dev_private; 585 drm_i915_private_t *dev_priv = dev->dev_private;
585 ssize_t remain; 586 ssize_t remain;
586 loff_t offset, page_base; 587 loff_t offset, page_base;
@@ -604,7 +605,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
604 if (ret) 605 if (ret)
605 goto fail; 606 goto fail;
606 607
607 obj_priv = obj->driver_private; 608 obj_priv = to_intel_bo(obj);
608 offset = obj_priv->gtt_offset + args->offset; 609 offset = obj_priv->gtt_offset + args->offset;
609 610
610 while (remain > 0) { 611 while (remain > 0) {
@@ -654,7 +655,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
654 struct drm_i915_gem_pwrite *args, 655 struct drm_i915_gem_pwrite *args,
655 struct drm_file *file_priv) 656 struct drm_file *file_priv)
656{ 657{
657 struct drm_i915_gem_object *obj_priv = obj->driver_private; 658 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
658 drm_i915_private_t *dev_priv = dev->dev_private; 659 drm_i915_private_t *dev_priv = dev->dev_private;
659 ssize_t remain; 660 ssize_t remain;
660 loff_t gtt_page_base, offset; 661 loff_t gtt_page_base, offset;
@@ -698,7 +699,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
698 if (ret) 699 if (ret)
699 goto out_unpin_object; 700 goto out_unpin_object;
700 701
701 obj_priv = obj->driver_private; 702 obj_priv = to_intel_bo(obj);
702 offset = obj_priv->gtt_offset + args->offset; 703 offset = obj_priv->gtt_offset + args->offset;
703 704
704 while (remain > 0) { 705 while (remain > 0) {
@@ -760,7 +761,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
760 struct drm_i915_gem_pwrite *args, 761 struct drm_i915_gem_pwrite *args,
761 struct drm_file *file_priv) 762 struct drm_file *file_priv)
762{ 763{
763 struct drm_i915_gem_object *obj_priv = obj->driver_private; 764 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
764 ssize_t remain; 765 ssize_t remain;
765 loff_t offset, page_base; 766 loff_t offset, page_base;
766 char __user *user_data; 767 char __user *user_data;
@@ -780,7 +781,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
780 if (ret != 0) 781 if (ret != 0)
781 goto fail_put_pages; 782 goto fail_put_pages;
782 783
783 obj_priv = obj->driver_private; 784 obj_priv = to_intel_bo(obj);
784 offset = args->offset; 785 offset = args->offset;
785 obj_priv->dirty = 1; 786 obj_priv->dirty = 1;
786 787
@@ -828,7 +829,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
828 struct drm_i915_gem_pwrite *args, 829 struct drm_i915_gem_pwrite *args,
829 struct drm_file *file_priv) 830 struct drm_file *file_priv)
830{ 831{
831 struct drm_i915_gem_object *obj_priv = obj->driver_private; 832 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
832 struct mm_struct *mm = current->mm; 833 struct mm_struct *mm = current->mm;
833 struct page **user_pages; 834 struct page **user_pages;
834 ssize_t remain; 835 ssize_t remain;
@@ -876,7 +877,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
876 if (ret != 0) 877 if (ret != 0)
877 goto fail_put_pages; 878 goto fail_put_pages;
878 879
879 obj_priv = obj->driver_private; 880 obj_priv = to_intel_bo(obj);
880 offset = args->offset; 881 offset = args->offset;
881 obj_priv->dirty = 1; 882 obj_priv->dirty = 1;
882 883
@@ -951,7 +952,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
951 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 952 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
952 if (obj == NULL) 953 if (obj == NULL)
953 return -EBADF; 954 return -EBADF;
954 obj_priv = obj->driver_private; 955 obj_priv = to_intel_bo(obj);
955 956
956 /* Bounds check destination. 957 /* Bounds check destination.
957 * 958 *
@@ -1033,7 +1034,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1033 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1034 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
1034 if (obj == NULL) 1035 if (obj == NULL)
1035 return -EBADF; 1036 return -EBADF;
1036 obj_priv = obj->driver_private; 1037 obj_priv = to_intel_bo(obj);
1037 1038
1038 mutex_lock(&dev->struct_mutex); 1039 mutex_lock(&dev->struct_mutex);
1039 1040
@@ -1095,7 +1096,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1095 DRM_INFO("%s: sw_finish %d (%p %zd)\n", 1096 DRM_INFO("%s: sw_finish %d (%p %zd)\n",
1096 __func__, args->handle, obj, obj->size); 1097 __func__, args->handle, obj, obj->size);
1097#endif 1098#endif
1098 obj_priv = obj->driver_private; 1099 obj_priv = to_intel_bo(obj);
1099 1100
1100 /* Pinned buffers may be scanout, so flush the cache */ 1101 /* Pinned buffers may be scanout, so flush the cache */
1101 if (obj_priv->pin_count) 1102 if (obj_priv->pin_count)
@@ -1166,7 +1167,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1166 struct drm_gem_object *obj = vma->vm_private_data; 1167 struct drm_gem_object *obj = vma->vm_private_data;
1167 struct drm_device *dev = obj->dev; 1168 struct drm_device *dev = obj->dev;
1168 struct drm_i915_private *dev_priv = dev->dev_private; 1169 struct drm_i915_private *dev_priv = dev->dev_private;
1169 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1170 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1170 pgoff_t page_offset; 1171 pgoff_t page_offset;
1171 unsigned long pfn; 1172 unsigned long pfn;
1172 int ret = 0; 1173 int ret = 0;
@@ -1233,7 +1234,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1233{ 1234{
1234 struct drm_device *dev = obj->dev; 1235 struct drm_device *dev = obj->dev;
1235 struct drm_gem_mm *mm = dev->mm_private; 1236 struct drm_gem_mm *mm = dev->mm_private;
1236 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1237 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1237 struct drm_map_list *list; 1238 struct drm_map_list *list;
1238 struct drm_local_map *map; 1239 struct drm_local_map *map;
1239 int ret = 0; 1240 int ret = 0;
@@ -1304,7 +1305,7 @@ void
1304i915_gem_release_mmap(struct drm_gem_object *obj) 1305i915_gem_release_mmap(struct drm_gem_object *obj)
1305{ 1306{
1306 struct drm_device *dev = obj->dev; 1307 struct drm_device *dev = obj->dev;
1307 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1308 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1308 1309
1309 if (dev->dev_mapping) 1310 if (dev->dev_mapping)
1310 unmap_mapping_range(dev->dev_mapping, 1311 unmap_mapping_range(dev->dev_mapping,
@@ -1315,7 +1316,7 @@ static void
1315i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1316i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1316{ 1317{
1317 struct drm_device *dev = obj->dev; 1318 struct drm_device *dev = obj->dev;
1318 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1319 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1319 struct drm_gem_mm *mm = dev->mm_private; 1320 struct drm_gem_mm *mm = dev->mm_private;
1320 struct drm_map_list *list; 1321 struct drm_map_list *list;
1321 1322
@@ -1346,7 +1347,7 @@ static uint32_t
1346i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1347i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1347{ 1348{
1348 struct drm_device *dev = obj->dev; 1349 struct drm_device *dev = obj->dev;
1349 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1350 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1350 int start, i; 1351 int start, i;
1351 1352
1352 /* 1353 /*
@@ -1405,7 +1406,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1405 1406
1406 mutex_lock(&dev->struct_mutex); 1407 mutex_lock(&dev->struct_mutex);
1407 1408
1408 obj_priv = obj->driver_private; 1409 obj_priv = to_intel_bo(obj);
1409 1410
1410 if (obj_priv->madv != I915_MADV_WILLNEED) { 1411 if (obj_priv->madv != I915_MADV_WILLNEED) {
1411 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1412 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
@@ -1449,7 +1450,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1449void 1450void
1450i915_gem_object_put_pages(struct drm_gem_object *obj) 1451i915_gem_object_put_pages(struct drm_gem_object *obj)
1451{ 1452{
1452 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1453 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1453 int page_count = obj->size / PAGE_SIZE; 1454 int page_count = obj->size / PAGE_SIZE;
1454 int i; 1455 int i;
1455 1456
@@ -1485,7 +1486,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1485{ 1486{
1486 struct drm_device *dev = obj->dev; 1487 struct drm_device *dev = obj->dev;
1487 drm_i915_private_t *dev_priv = dev->dev_private; 1488 drm_i915_private_t *dev_priv = dev->dev_private;
1488 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1489 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1489 1490
1490 /* Add a reference if we're newly entering the active list. */ 1491 /* Add a reference if we're newly entering the active list. */
1491 if (!obj_priv->active) { 1492 if (!obj_priv->active) {
@@ -1505,7 +1506,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1505{ 1506{
1506 struct drm_device *dev = obj->dev; 1507 struct drm_device *dev = obj->dev;
1507 drm_i915_private_t *dev_priv = dev->dev_private; 1508 drm_i915_private_t *dev_priv = dev->dev_private;
1508 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1509 1510
1510 BUG_ON(!obj_priv->active); 1511 BUG_ON(!obj_priv->active);
1511 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1512 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
@@ -1516,7 +1517,7 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1516static void 1517static void
1517i915_gem_object_truncate(struct drm_gem_object *obj) 1518i915_gem_object_truncate(struct drm_gem_object *obj)
1518{ 1519{
1519 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1520 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1520 struct inode *inode; 1521 struct inode *inode;
1521 1522
1522 inode = obj->filp->f_path.dentry->d_inode; 1523 inode = obj->filp->f_path.dentry->d_inode;
@@ -1537,7 +1538,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1537{ 1538{
1538 struct drm_device *dev = obj->dev; 1539 struct drm_device *dev = obj->dev;
1539 drm_i915_private_t *dev_priv = dev->dev_private; 1540 drm_i915_private_t *dev_priv = dev->dev_private;
1540 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1541 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1541 1542
1542 i915_verify_inactive(dev, __FILE__, __LINE__); 1543 i915_verify_inactive(dev, __FILE__, __LINE__);
1543 if (obj_priv->pin_count != 0) 1544 if (obj_priv->pin_count != 0)
@@ -1587,6 +1588,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1587 } 1588 }
1588} 1589}
1589 1590
1591#define PIPE_CONTROL_FLUSH(addr) \
1592 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
1593 PIPE_CONTROL_DEPTH_STALL); \
1594 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
1595 OUT_RING(0); \
1596 OUT_RING(0); \
1597
1590/** 1598/**
1591 * Creates a new sequence number, emitting a write of it to the status page 1599 * Creates a new sequence number, emitting a write of it to the status page
1592 * plus an interrupt, which will trigger i915_user_interrupt_handler. 1600 * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1621,13 +1629,47 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1621 if (dev_priv->mm.next_gem_seqno == 0) 1629 if (dev_priv->mm.next_gem_seqno == 0)
1622 dev_priv->mm.next_gem_seqno++; 1630 dev_priv->mm.next_gem_seqno++;
1623 1631
1624 BEGIN_LP_RING(4); 1632 if (HAS_PIPE_CONTROL(dev)) {
1625 OUT_RING(MI_STORE_DWORD_INDEX); 1633 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
1626 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1627 OUT_RING(seqno);
1628 1634
1629 OUT_RING(MI_USER_INTERRUPT); 1635 /*
1630 ADVANCE_LP_RING(); 1636 * Workaround qword write incoherence by flushing the
1637 * PIPE_NOTIFY buffers out to memory before requesting
1638 * an interrupt.
1639 */
1640 BEGIN_LP_RING(32);
1641 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1642 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
1643 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1644 OUT_RING(seqno);
1645 OUT_RING(0);
1646 PIPE_CONTROL_FLUSH(scratch_addr);
1647 scratch_addr += 128; /* write to separate cachelines */
1648 PIPE_CONTROL_FLUSH(scratch_addr);
1649 scratch_addr += 128;
1650 PIPE_CONTROL_FLUSH(scratch_addr);
1651 scratch_addr += 128;
1652 PIPE_CONTROL_FLUSH(scratch_addr);
1653 scratch_addr += 128;
1654 PIPE_CONTROL_FLUSH(scratch_addr);
1655 scratch_addr += 128;
1656 PIPE_CONTROL_FLUSH(scratch_addr);
1657 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
1658 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
1659 PIPE_CONTROL_NOTIFY);
1660 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
1661 OUT_RING(seqno);
1662 OUT_RING(0);
1663 ADVANCE_LP_RING();
1664 } else {
1665 BEGIN_LP_RING(4);
1666 OUT_RING(MI_STORE_DWORD_INDEX);
1667 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1668 OUT_RING(seqno);
1669
1670 OUT_RING(MI_USER_INTERRUPT);
1671 ADVANCE_LP_RING();
1672 }
1631 1673
1632 DRM_DEBUG_DRIVER("%d\n", seqno); 1674 DRM_DEBUG_DRIVER("%d\n", seqno);
1633 1675
@@ -1751,7 +1793,10 @@ i915_get_gem_seqno(struct drm_device *dev)
1751{ 1793{
1752 drm_i915_private_t *dev_priv = dev->dev_private; 1794 drm_i915_private_t *dev_priv = dev->dev_private;
1753 1795
1754 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 1796 if (HAS_PIPE_CONTROL(dev))
1797 return ((volatile u32 *)(dev_priv->seqno_page))[0];
1798 else
1799 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1755} 1800}
1756 1801
1757/** 1802/**
@@ -1964,7 +2009,7 @@ static int
1964i915_gem_object_wait_rendering(struct drm_gem_object *obj) 2009i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1965{ 2010{
1966 struct drm_device *dev = obj->dev; 2011 struct drm_device *dev = obj->dev;
1967 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2012 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1968 int ret; 2013 int ret;
1969 2014
1970 /* This function only exists to support waiting for existing rendering, 2015 /* This function only exists to support waiting for existing rendering,
@@ -1996,7 +2041,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
1996{ 2041{
1997 struct drm_device *dev = obj->dev; 2042 struct drm_device *dev = obj->dev;
1998 drm_i915_private_t *dev_priv = dev->dev_private; 2043 drm_i915_private_t *dev_priv = dev->dev_private;
1999 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2044 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2000 int ret = 0; 2045 int ret = 0;
2001 2046
2002#if WATCH_BUF 2047#if WATCH_BUF
@@ -2172,7 +2217,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
2172#if WATCH_LRU 2217#if WATCH_LRU
2173 DRM_INFO("%s: evicting %p\n", __func__, obj); 2218 DRM_INFO("%s: evicting %p\n", __func__, obj);
2174#endif 2219#endif
2175 obj_priv = obj->driver_private; 2220 obj_priv = to_intel_bo(obj);
2176 BUG_ON(obj_priv->pin_count != 0); 2221 BUG_ON(obj_priv->pin_count != 0);
2177 BUG_ON(obj_priv->active); 2222 BUG_ON(obj_priv->active);
2178 2223
@@ -2243,7 +2288,7 @@ int
2243i915_gem_object_get_pages(struct drm_gem_object *obj, 2288i915_gem_object_get_pages(struct drm_gem_object *obj,
2244 gfp_t gfpmask) 2289 gfp_t gfpmask)
2245{ 2290{
2246 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2291 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2247 int page_count, i; 2292 int page_count, i;
2248 struct address_space *mapping; 2293 struct address_space *mapping;
2249 struct inode *inode; 2294 struct inode *inode;
@@ -2296,7 +2341,7 @@ static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
2296 struct drm_gem_object *obj = reg->obj; 2341 struct drm_gem_object *obj = reg->obj;
2297 struct drm_device *dev = obj->dev; 2342 struct drm_device *dev = obj->dev;
2298 drm_i915_private_t *dev_priv = dev->dev_private; 2343 drm_i915_private_t *dev_priv = dev->dev_private;
2299 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2344 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2300 int regnum = obj_priv->fence_reg; 2345 int regnum = obj_priv->fence_reg;
2301 uint64_t val; 2346 uint64_t val;
2302 2347
@@ -2318,7 +2363,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
2318 struct drm_gem_object *obj = reg->obj; 2363 struct drm_gem_object *obj = reg->obj;
2319 struct drm_device *dev = obj->dev; 2364 struct drm_device *dev = obj->dev;
2320 drm_i915_private_t *dev_priv = dev->dev_private; 2365 drm_i915_private_t *dev_priv = dev->dev_private;
2321 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2366 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2322 int regnum = obj_priv->fence_reg; 2367 int regnum = obj_priv->fence_reg;
2323 uint64_t val; 2368 uint64_t val;
2324 2369
@@ -2338,7 +2383,7 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2338 struct drm_gem_object *obj = reg->obj; 2383 struct drm_gem_object *obj = reg->obj;
2339 struct drm_device *dev = obj->dev; 2384 struct drm_device *dev = obj->dev;
2340 drm_i915_private_t *dev_priv = dev->dev_private; 2385 drm_i915_private_t *dev_priv = dev->dev_private;
2341 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2386 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2342 int regnum = obj_priv->fence_reg; 2387 int regnum = obj_priv->fence_reg;
2343 int tile_width; 2388 int tile_width;
2344 uint32_t fence_reg, val; 2389 uint32_t fence_reg, val;
@@ -2361,6 +2406,12 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
2361 pitch_val = obj_priv->stride / tile_width; 2406 pitch_val = obj_priv->stride / tile_width;
2362 pitch_val = ffs(pitch_val) - 1; 2407 pitch_val = ffs(pitch_val) - 1;
2363 2408
2409 if (obj_priv->tiling_mode == I915_TILING_Y &&
2410 HAS_128_BYTE_Y_TILING(dev))
2411 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2412 else
2413 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2414
2364 val = obj_priv->gtt_offset; 2415 val = obj_priv->gtt_offset;
2365 if (obj_priv->tiling_mode == I915_TILING_Y) 2416 if (obj_priv->tiling_mode == I915_TILING_Y)
2366 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2417 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2380,7 +2431,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2380 struct drm_gem_object *obj = reg->obj; 2431 struct drm_gem_object *obj = reg->obj;
2381 struct drm_device *dev = obj->dev; 2432 struct drm_device *dev = obj->dev;
2382 drm_i915_private_t *dev_priv = dev->dev_private; 2433 drm_i915_private_t *dev_priv = dev->dev_private;
2383 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2434 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2384 int regnum = obj_priv->fence_reg; 2435 int regnum = obj_priv->fence_reg;
2385 uint32_t val; 2436 uint32_t val;
2386 uint32_t pitch_val; 2437 uint32_t pitch_val;
@@ -2424,7 +2475,7 @@ static int i915_find_fence_reg(struct drm_device *dev)
2424 if (!reg->obj) 2475 if (!reg->obj)
2425 return i; 2476 return i;
2426 2477
2427 obj_priv = reg->obj->driver_private; 2478 obj_priv = to_intel_bo(reg->obj);
2428 if (!obj_priv->pin_count) 2479 if (!obj_priv->pin_count)
2429 avail++; 2480 avail++;
2430 } 2481 }
@@ -2479,7 +2530,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
2479{ 2530{
2480 struct drm_device *dev = obj->dev; 2531 struct drm_device *dev = obj->dev;
2481 struct drm_i915_private *dev_priv = dev->dev_private; 2532 struct drm_i915_private *dev_priv = dev->dev_private;
2482 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2533 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2483 struct drm_i915_fence_reg *reg = NULL; 2534 struct drm_i915_fence_reg *reg = NULL;
2484 int ret; 2535 int ret;
2485 2536
@@ -2546,7 +2597,7 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
2546{ 2597{
2547 struct drm_device *dev = obj->dev; 2598 struct drm_device *dev = obj->dev;
2548 drm_i915_private_t *dev_priv = dev->dev_private; 2599 drm_i915_private_t *dev_priv = dev->dev_private;
2549 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2600 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2550 2601
2551 if (IS_GEN6(dev)) { 2602 if (IS_GEN6(dev)) {
2552 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2603 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
@@ -2582,7 +2633,7 @@ int
2582i915_gem_object_put_fence_reg(struct drm_gem_object *obj) 2633i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
2583{ 2634{
2584 struct drm_device *dev = obj->dev; 2635 struct drm_device *dev = obj->dev;
2585 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2636 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2586 2637
2587 if (obj_priv->fence_reg == I915_FENCE_REG_NONE) 2638 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2588 return 0; 2639 return 0;
@@ -2620,7 +2671,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2620{ 2671{
2621 struct drm_device *dev = obj->dev; 2672 struct drm_device *dev = obj->dev;
2622 drm_i915_private_t *dev_priv = dev->dev_private; 2673 drm_i915_private_t *dev_priv = dev->dev_private;
2623 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2674 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2624 struct drm_mm_node *free_space; 2675 struct drm_mm_node *free_space;
2625 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2676 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2626 int ret; 2677 int ret;
@@ -2727,7 +2778,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2727void 2778void
2728i915_gem_clflush_object(struct drm_gem_object *obj) 2779i915_gem_clflush_object(struct drm_gem_object *obj)
2729{ 2780{
2730 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2781 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2731 2782
2732 /* If we don't have a page list set up, then we're not pinned 2783 /* If we don't have a page list set up, then we're not pinned
2733 * to GPU, and we can ignore the cache flush because it'll happen 2784 * to GPU, and we can ignore the cache flush because it'll happen
@@ -2828,7 +2879,7 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2828int 2879int
2829i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2880i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2830{ 2881{
2831 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2882 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2832 uint32_t old_write_domain, old_read_domains; 2883 uint32_t old_write_domain, old_read_domains;
2833 int ret; 2884 int ret;
2834 2885
@@ -2878,7 +2929,7 @@ int
2878i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) 2929i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
2879{ 2930{
2880 struct drm_device *dev = obj->dev; 2931 struct drm_device *dev = obj->dev;
2881 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2932 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2882 uint32_t old_write_domain, old_read_domains; 2933 uint32_t old_write_domain, old_read_domains;
2883 int ret; 2934 int ret;
2884 2935
@@ -3091,7 +3142,7 @@ static void
3091i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 3142i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3092{ 3143{
3093 struct drm_device *dev = obj->dev; 3144 struct drm_device *dev = obj->dev;
3094 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3145 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3095 uint32_t invalidate_domains = 0; 3146 uint32_t invalidate_domains = 0;
3096 uint32_t flush_domains = 0; 3147 uint32_t flush_domains = 0;
3097 uint32_t old_read_domains; 3148 uint32_t old_read_domains;
@@ -3176,7 +3227,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
3176static void 3227static void
3177i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3228i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3178{ 3229{
3179 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3180 3231
3181 if (!obj_priv->page_cpu_valid) 3232 if (!obj_priv->page_cpu_valid)
3182 return; 3233 return;
@@ -3216,7 +3267,7 @@ static int
3216i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3267i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3217 uint64_t offset, uint64_t size) 3268 uint64_t offset, uint64_t size)
3218{ 3269{
3219 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3270 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3220 uint32_t old_read_domains; 3271 uint32_t old_read_domains;
3221 int i, ret; 3272 int i, ret;
3222 3273
@@ -3285,7 +3336,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3285{ 3336{
3286 struct drm_device *dev = obj->dev; 3337 struct drm_device *dev = obj->dev;
3287 drm_i915_private_t *dev_priv = dev->dev_private; 3338 drm_i915_private_t *dev_priv = dev->dev_private;
3288 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3339 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3289 int i, ret; 3340 int i, ret;
3290 void __iomem *reloc_page; 3341 void __iomem *reloc_page;
3291 bool need_fence; 3342 bool need_fence;
@@ -3336,7 +3387,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
3336 i915_gem_object_unpin(obj); 3387 i915_gem_object_unpin(obj);
3337 return -EBADF; 3388 return -EBADF;
3338 } 3389 }
3339 target_obj_priv = target_obj->driver_private; 3390 target_obj_priv = to_intel_bo(target_obj);
3340 3391
3341#if WATCH_RELOC 3392#if WATCH_RELOC
3342 DRM_INFO("%s: obj %p offset %08x target %d " 3393 DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3688,7 +3739,7 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
3688 prepare_to_wait(&dev_priv->pending_flip_queue, 3739 prepare_to_wait(&dev_priv->pending_flip_queue,
3689 &wait, TASK_INTERRUPTIBLE); 3740 &wait, TASK_INTERRUPTIBLE);
3690 for (i = 0; i < count; i++) { 3741 for (i = 0; i < count; i++) {
3691 obj_priv = object_list[i]->driver_private; 3742 obj_priv = to_intel_bo(object_list[i]);
3692 if (atomic_read(&obj_priv->pending_flip) > 0) 3743 if (atomic_read(&obj_priv->pending_flip) > 0)
3693 break; 3744 break;
3694 } 3745 }
@@ -3797,7 +3848,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3797 goto err; 3848 goto err;
3798 } 3849 }
3799 3850
3800 obj_priv = object_list[i]->driver_private; 3851 obj_priv = to_intel_bo(object_list[i]);
3801 if (obj_priv->in_execbuffer) { 3852 if (obj_priv->in_execbuffer) {
3802 DRM_ERROR("Object %p appears more than once in object list\n", 3853 DRM_ERROR("Object %p appears more than once in object list\n",
3803 object_list[i]); 3854 object_list[i]);
@@ -3923,7 +3974,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3923 3974
3924 for (i = 0; i < args->buffer_count; i++) { 3975 for (i = 0; i < args->buffer_count; i++) {
3925 struct drm_gem_object *obj = object_list[i]; 3976 struct drm_gem_object *obj = object_list[i];
3926 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3977 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3927 uint32_t old_write_domain = obj->write_domain; 3978 uint32_t old_write_domain = obj->write_domain;
3928 3979
3929 obj->write_domain = obj->pending_write_domain; 3980 obj->write_domain = obj->pending_write_domain;
@@ -3998,7 +4049,7 @@ err:
3998 4049
3999 for (i = 0; i < args->buffer_count; i++) { 4050 for (i = 0; i < args->buffer_count; i++) {
4000 if (object_list[i]) { 4051 if (object_list[i]) {
4001 obj_priv = object_list[i]->driver_private; 4052 obj_priv = to_intel_bo(object_list[i]);
4002 obj_priv->in_execbuffer = false; 4053 obj_priv->in_execbuffer = false;
4003 } 4054 }
4004 drm_gem_object_unreference(object_list[i]); 4055 drm_gem_object_unreference(object_list[i]);
@@ -4176,7 +4227,7 @@ int
4176i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 4227i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4177{ 4228{
4178 struct drm_device *dev = obj->dev; 4229 struct drm_device *dev = obj->dev;
4179 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4230 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4180 int ret; 4231 int ret;
4181 4232
4182 i915_verify_inactive(dev, __FILE__, __LINE__); 4233 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -4209,7 +4260,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4209{ 4260{
4210 struct drm_device *dev = obj->dev; 4261 struct drm_device *dev = obj->dev;
4211 drm_i915_private_t *dev_priv = dev->dev_private; 4262 drm_i915_private_t *dev_priv = dev->dev_private;
4212 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4263 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4213 4264
4214 i915_verify_inactive(dev, __FILE__, __LINE__); 4265 i915_verify_inactive(dev, __FILE__, __LINE__);
4215 obj_priv->pin_count--; 4266 obj_priv->pin_count--;
@@ -4249,7 +4300,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4249 mutex_unlock(&dev->struct_mutex); 4300 mutex_unlock(&dev->struct_mutex);
4250 return -EBADF; 4301 return -EBADF;
4251 } 4302 }
4252 obj_priv = obj->driver_private; 4303 obj_priv = to_intel_bo(obj);
4253 4304
4254 if (obj_priv->madv != I915_MADV_WILLNEED) { 4305 if (obj_priv->madv != I915_MADV_WILLNEED) {
4255 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 4306 DRM_ERROR("Attempting to pin a purgeable buffer\n");
@@ -4306,7 +4357,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4306 return -EBADF; 4357 return -EBADF;
4307 } 4358 }
4308 4359
4309 obj_priv = obj->driver_private; 4360 obj_priv = to_intel_bo(obj);
4310 if (obj_priv->pin_filp != file_priv) { 4361 if (obj_priv->pin_filp != file_priv) {
4311 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 4362 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4312 args->handle); 4363 args->handle);
@@ -4348,7 +4399,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4348 */ 4399 */
4349 i915_gem_retire_requests(dev); 4400 i915_gem_retire_requests(dev);
4350 4401
4351 obj_priv = obj->driver_private; 4402 obj_priv = to_intel_bo(obj);
4352 /* Don't count being on the flushing list against the object being 4403 /* Don't count being on the flushing list against the object being
4353 * done. Otherwise, a buffer left on the flushing list but not getting 4404 * done. Otherwise, a buffer left on the flushing list but not getting
4354 * flushed (because nobody's flushing that domain) won't ever return 4405 * flushed (because nobody's flushing that domain) won't ever return
@@ -4394,7 +4445,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4394 } 4445 }
4395 4446
4396 mutex_lock(&dev->struct_mutex); 4447 mutex_lock(&dev->struct_mutex);
4397 obj_priv = obj->driver_private; 4448 obj_priv = to_intel_bo(obj);
4398 4449
4399 if (obj_priv->pin_count) { 4450 if (obj_priv->pin_count) {
4400 drm_gem_object_unreference(obj); 4451 drm_gem_object_unreference(obj);
@@ -4455,7 +4506,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4455void i915_gem_free_object(struct drm_gem_object *obj) 4506void i915_gem_free_object(struct drm_gem_object *obj)
4456{ 4507{
4457 struct drm_device *dev = obj->dev; 4508 struct drm_device *dev = obj->dev;
4458 struct drm_i915_gem_object *obj_priv = obj->driver_private; 4509 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4459 4510
4460 trace_i915_gem_object_destroy(obj); 4511 trace_i915_gem_object_destroy(obj);
4461 4512
@@ -4545,6 +4596,49 @@ i915_gem_idle(struct drm_device *dev)
4545 return 0; 4596 return 0;
4546} 4597}
4547 4598
4599/*
4600 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4601 * over cache flushing.
4602 */
4603static int
4604i915_gem_init_pipe_control(struct drm_device *dev)
4605{
4606 drm_i915_private_t *dev_priv = dev->dev_private;
4607 struct drm_gem_object *obj;
4608 struct drm_i915_gem_object *obj_priv;
4609 int ret;
4610
4611 obj = drm_gem_object_alloc(dev, 4096);
4612 if (obj == NULL) {
4613 DRM_ERROR("Failed to allocate seqno page\n");
4614 ret = -ENOMEM;
4615 goto err;
4616 }
4617 obj_priv = to_intel_bo(obj);
4618 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4619
4620 ret = i915_gem_object_pin(obj, 4096);
4621 if (ret)
4622 goto err_unref;
4623
4624 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4625 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4626 if (dev_priv->seqno_page == NULL)
4627 goto err_unpin;
4628
4629 dev_priv->seqno_obj = obj;
4630 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4631
4632 return 0;
4633
4634err_unpin:
4635 i915_gem_object_unpin(obj);
4636err_unref:
4637 drm_gem_object_unreference(obj);
4638err:
4639 return ret;
4640}
4641
4548static int 4642static int
4549i915_gem_init_hws(struct drm_device *dev) 4643i915_gem_init_hws(struct drm_device *dev)
4550{ 4644{
@@ -4562,15 +4656,16 @@ i915_gem_init_hws(struct drm_device *dev)
4562 obj = drm_gem_object_alloc(dev, 4096); 4656 obj = drm_gem_object_alloc(dev, 4096);
4563 if (obj == NULL) { 4657 if (obj == NULL) {
4564 DRM_ERROR("Failed to allocate status page\n"); 4658 DRM_ERROR("Failed to allocate status page\n");
4565 return -ENOMEM; 4659 ret = -ENOMEM;
4660 goto err;
4566 } 4661 }
4567 obj_priv = obj->driver_private; 4662 obj_priv = to_intel_bo(obj);
4568 obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 4663 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4569 4664
4570 ret = i915_gem_object_pin(obj, 4096); 4665 ret = i915_gem_object_pin(obj, 4096);
4571 if (ret != 0) { 4666 if (ret != 0) {
4572 drm_gem_object_unreference(obj); 4667 drm_gem_object_unreference(obj);
4573 return ret; 4668 goto err_unref;
4574 } 4669 }
4575 4670
4576 dev_priv->status_gfx_addr = obj_priv->gtt_offset; 4671 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
@@ -4579,10 +4674,16 @@ i915_gem_init_hws(struct drm_device *dev)
4579 if (dev_priv->hw_status_page == NULL) { 4674 if (dev_priv->hw_status_page == NULL) {
4580 DRM_ERROR("Failed to map status page.\n"); 4675 DRM_ERROR("Failed to map status page.\n");
4581 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4676 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4582 i915_gem_object_unpin(obj); 4677 ret = -EINVAL;
4583 drm_gem_object_unreference(obj); 4678 goto err_unpin;
4584 return -EINVAL;
4585 } 4679 }
4680
4681 if (HAS_PIPE_CONTROL(dev)) {
4682 ret = i915_gem_init_pipe_control(dev);
4683 if (ret)
4684 goto err_unpin;
4685 }
4686
4586 dev_priv->hws_obj = obj; 4687 dev_priv->hws_obj = obj;
4587 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4688 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4588 if (IS_GEN6(dev)) { 4689 if (IS_GEN6(dev)) {
@@ -4595,6 +4696,30 @@ i915_gem_init_hws(struct drm_device *dev)
4595 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4696 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4596 4697
4597 return 0; 4698 return 0;
4699
4700err_unpin:
4701 i915_gem_object_unpin(obj);
4702err_unref:
4703 drm_gem_object_unreference(obj);
4704err:
4705 return 0;
4706}
4707
4708static void
4709i915_gem_cleanup_pipe_control(struct drm_device *dev)
4710{
4711 drm_i915_private_t *dev_priv = dev->dev_private;
4712 struct drm_gem_object *obj;
4713 struct drm_i915_gem_object *obj_priv;
4714
4715 obj = dev_priv->seqno_obj;
4716 obj_priv = to_intel_bo(obj);
4717 kunmap(obj_priv->pages[0]);
4718 i915_gem_object_unpin(obj);
4719 drm_gem_object_unreference(obj);
4720 dev_priv->seqno_obj = NULL;
4721
4722 dev_priv->seqno_page = NULL;
4598} 4723}
4599 4724
4600static void 4725static void
@@ -4608,7 +4733,7 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4608 return; 4733 return;
4609 4734
4610 obj = dev_priv->hws_obj; 4735 obj = dev_priv->hws_obj;
4611 obj_priv = obj->driver_private; 4736 obj_priv = to_intel_bo(obj);
4612 4737
4613 kunmap(obj_priv->pages[0]); 4738 kunmap(obj_priv->pages[0]);
4614 i915_gem_object_unpin(obj); 4739 i915_gem_object_unpin(obj);
@@ -4618,6 +4743,9 @@ i915_gem_cleanup_hws(struct drm_device *dev)
4618 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 4743 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
4619 dev_priv->hw_status_page = NULL; 4744 dev_priv->hw_status_page = NULL;
4620 4745
4746 if (HAS_PIPE_CONTROL(dev))
4747 i915_gem_cleanup_pipe_control(dev);
4748
4621 /* Write high address into HWS_PGA when disabling. */ 4749 /* Write high address into HWS_PGA when disabling. */
4622 I915_WRITE(HWS_PGA, 0x1ffff000); 4750 I915_WRITE(HWS_PGA, 0x1ffff000);
4623} 4751}
@@ -4642,7 +4770,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4642 i915_gem_cleanup_hws(dev); 4770 i915_gem_cleanup_hws(dev);
4643 return -ENOMEM; 4771 return -ENOMEM;
4644 } 4772 }
4645 obj_priv = obj->driver_private; 4773 obj_priv = to_intel_bo(obj);
4646 4774
4647 ret = i915_gem_object_pin(obj, 4096); 4775 ret = i915_gem_object_pin(obj, 4096);
4648 if (ret != 0) { 4776 if (ret != 0) {
@@ -4935,7 +5063,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4935 int ret; 5063 int ret;
4936 int page_count; 5064 int page_count;
4937 5065
4938 obj_priv = obj->driver_private; 5066 obj_priv = to_intel_bo(obj);
4939 if (!obj_priv->phys_obj) 5067 if (!obj_priv->phys_obj)
4940 return; 5068 return;
4941 5069
@@ -4974,7 +5102,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4974 if (id > I915_MAX_PHYS_OBJECT) 5102 if (id > I915_MAX_PHYS_OBJECT)
4975 return -EINVAL; 5103 return -EINVAL;
4976 5104
4977 obj_priv = obj->driver_private; 5105 obj_priv = to_intel_bo(obj);
4978 5106
4979 if (obj_priv->phys_obj) { 5107 if (obj_priv->phys_obj) {
4980 if (obj_priv->phys_obj->id == id) 5108 if (obj_priv->phys_obj->id == id)
@@ -5025,7 +5153,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
5025 struct drm_i915_gem_pwrite *args, 5153 struct drm_i915_gem_pwrite *args,
5026 struct drm_file *file_priv) 5154 struct drm_file *file_priv)
5027{ 5155{
5028 struct drm_i915_gem_object *obj_priv = obj->driver_private; 5156 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5029 void *obj_addr; 5157 void *obj_addr;
5030 int ret; 5158 int ret;
5031 char __user *user_data; 5159 char __user *user_data;
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index e602614bd3f8..35507cf53fa3 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -72,7 +72,7 @@ void
72i915_gem_dump_object(struct drm_gem_object *obj, int len, 72i915_gem_dump_object(struct drm_gem_object *obj, int len,
73 const char *where, uint32_t mark) 73 const char *where, uint32_t mark)
74{ 74{
75 struct drm_i915_gem_object *obj_priv = obj->driver_private; 75 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
76 int page; 76 int page;
77 77
78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 78 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
@@ -137,7 +137,7 @@ void
137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 137i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
138{ 138{
139 struct drm_device *dev = obj->dev; 139 struct drm_device *dev = obj->dev;
140 struct drm_i915_gem_object *obj_priv = obj->driver_private; 140 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
141 int page; 141 int page;
142 uint32_t *gtt_mapping; 142 uint32_t *gtt_mapping;
143 uint32_t *backing_map = NULL; 143 uint32_t *backing_map = NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index c01c878e51ba..4bdccefcf2cf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -202,21 +202,17 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
202 * reg, so dont bother to check the size */ 202 * reg, so dont bother to check the size */
203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 203 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
204 return false; 204 return false;
205 } else if (IS_I9XX(dev)) { 205 } else if (IS_GEN3(dev) || IS_GEN2(dev)) {
206 uint32_t pitch_val = ffs(stride / tile_width) - 1; 206 if (stride > 8192)
207
208 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
209 * instead of 4 (2KB) on 945s.
210 */
211 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
212 size > (I830_FENCE_MAX_SIZE_VAL << 20))
213 return false; 207 return false;
214 } else {
215 uint32_t pitch_val = ffs(stride / tile_width) - 1;
216 208
217 if (pitch_val > I830_FENCE_MAX_PITCH_VAL || 209 if (IS_GEN3(dev)) {
218 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 210 if (size > I830_FENCE_MAX_SIZE_VAL << 20)
219 return false; 211 return false;
212 } else {
213 if (size > I830_FENCE_MAX_SIZE_VAL << 19)
214 return false;
215 }
220 } 216 }
221 217
222 /* 965+ just needs multiples of tile width */ 218 /* 965+ just needs multiples of tile width */
@@ -240,7 +236,7 @@ bool
240i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
241{ 237{
242 struct drm_device *dev = obj->dev; 238 struct drm_device *dev = obj->dev;
243 struct drm_i915_gem_object *obj_priv = obj->driver_private; 239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
244 240
245 if (obj_priv->gtt_space == NULL) 241 if (obj_priv->gtt_space == NULL)
246 return true; 242 return true;
@@ -280,7 +276,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
280 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 276 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
281 if (obj == NULL) 277 if (obj == NULL)
282 return -EINVAL; 278 return -EINVAL;
283 obj_priv = obj->driver_private; 279 obj_priv = to_intel_bo(obj);
284 280
285 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 281 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
286 drm_gem_object_unreference_unlocked(obj); 282 drm_gem_object_unreference_unlocked(obj);
@@ -364,7 +360,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
364 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 360 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
365 if (obj == NULL) 361 if (obj == NULL)
366 return -EINVAL; 362 return -EINVAL;
367 obj_priv = obj->driver_private; 363 obj_priv = to_intel_bo(obj);
368 364
369 mutex_lock(&dev->struct_mutex); 365 mutex_lock(&dev->struct_mutex);
370 366
@@ -427,7 +423,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
427{ 423{
428 struct drm_device *dev = obj->dev; 424 struct drm_device *dev = obj->dev;
429 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
430 struct drm_i915_gem_object *obj_priv = obj->driver_private; 426 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
431 int page_count = obj->size >> PAGE_SHIFT; 427 int page_count = obj->size >> PAGE_SHIFT;
432 int i; 428 int i;
433 429
@@ -456,7 +452,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
456{ 452{
457 struct drm_device *dev = obj->dev; 453 struct drm_device *dev = obj->dev;
458 drm_i915_private_t *dev_priv = dev->dev_private; 454 drm_i915_private_t *dev_priv = dev->dev_private;
459 struct drm_i915_gem_object *obj_priv = obj->driver_private; 455 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
460 int page_count = obj->size >> PAGE_SHIFT; 456 int page_count = obj->size >> PAGE_SHIFT;
461 int i; 457 int i;
462 458
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5388354da0d1..2b8b969d0c15 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/sysrq.h> 29#include <linux/sysrq.h>
30#include <linux/slab.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -259,10 +260,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
259 260
260 if (mode_config->num_connector) { 261 if (mode_config->num_connector) {
261 list_for_each_entry(connector, &mode_config->connector_list, head) { 262 list_for_each_entry(connector, &mode_config->connector_list, head) {
262 struct intel_output *intel_output = to_intel_output(connector); 263 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
263 264
264 if (intel_output->hot_plug) 265 if (intel_encoder->hot_plug)
265 (*intel_output->hot_plug) (intel_output); 266 (*intel_encoder->hot_plug) (intel_encoder);
266 } 267 }
267 } 268 }
268 /* Just fire off a uevent and let userspace tell us what to do */ 269 /* Just fire off a uevent and let userspace tell us what to do */
@@ -348,7 +349,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
348 READ_BREADCRUMB(dev_priv); 349 READ_BREADCRUMB(dev_priv);
349 } 350 }
350 351
351 if (gt_iir & GT_USER_INTERRUPT) { 352 if (gt_iir & GT_PIPE_NOTIFY) {
352 u32 seqno = i915_get_gem_seqno(dev); 353 u32 seqno = i915_get_gem_seqno(dev);
353 dev_priv->mm.irq_gem_seqno = seqno; 354 dev_priv->mm.irq_gem_seqno = seqno;
354 trace_i915_gem_request_complete(dev, seqno); 355 trace_i915_gem_request_complete(dev, seqno);
@@ -443,7 +444,7 @@ i915_error_object_create(struct drm_device *dev,
443 if (src == NULL) 444 if (src == NULL)
444 return NULL; 445 return NULL;
445 446
446 src_priv = src->driver_private; 447 src_priv = to_intel_bo(src);
447 if (src_priv->pages == NULL) 448 if (src_priv->pages == NULL)
448 return NULL; 449 return NULL;
449 450
@@ -1004,7 +1005,7 @@ void i915_user_irq_get(struct drm_device *dev)
1004 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1005 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
1005 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 1006 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
1006 if (HAS_PCH_SPLIT(dev)) 1007 if (HAS_PCH_SPLIT(dev))
1007 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1008 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1008 else 1009 else
1009 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 1010 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
1010 } 1011 }
@@ -1020,7 +1021,7 @@ void i915_user_irq_put(struct drm_device *dev)
1020 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 1021 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
1021 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 1022 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
1022 if (HAS_PCH_SPLIT(dev)) 1023 if (HAS_PCH_SPLIT(dev))
1023 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 1024 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
1024 else 1025 else
1025 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 1026 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
1026 } 1027 }
@@ -1304,7 +1305,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1304 /* enable kind of interrupts always enabled */ 1305 /* enable kind of interrupts always enabled */
1305 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1306 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1306 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1307 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1307 u32 render_mask = GT_USER_INTERRUPT; 1308 u32 render_mask = GT_PIPE_NOTIFY;
1308 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1309 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1309 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1310 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1310 1311
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 7cc8410239cb..8fcc75c1aa28 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -382,8 +382,57 @@ static void intel_didl_outputs(struct drm_device *dev)
382 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
383 struct intel_opregion *opregion = &dev_priv->opregion; 383 struct intel_opregion *opregion = &dev_priv->opregion;
384 struct drm_connector *connector; 384 struct drm_connector *connector;
385 acpi_handle handle;
386 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
387 unsigned long long device_id;
388 acpi_status status;
385 int i = 0; 389 int i = 0;
386 390
391 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
392 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
393 return;
394
395 if (acpi_is_video_device(acpi_dev))
396 acpi_video_bus = acpi_dev;
397 else {
398 list_for_each_entry(acpi_cdev, &acpi_dev->children, node) {
399 if (acpi_is_video_device(acpi_cdev)) {
400 acpi_video_bus = acpi_cdev;
401 break;
402 }
403 }
404 }
405
406 if (!acpi_video_bus) {
407 printk(KERN_WARNING "No ACPI video bus found\n");
408 return;
409 }
410
411 list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
412 if (i >= 8) {
413 dev_printk (KERN_ERR, &dev->pdev->dev,
414 "More than 8 outputs detected\n");
415 return;
416 }
417 status =
418 acpi_evaluate_integer(acpi_cdev->handle, "_ADR",
419 NULL, &device_id);
420 if (ACPI_SUCCESS(status)) {
421 if (!device_id)
422 goto blind_set;
423 opregion->acpi->didl[i] = (u32)(device_id & 0x0f0f);
424 i++;
425 }
426 }
427
428end:
429 /* If fewer than 8 outputs, the list must be null terminated */
430 if (i < 8)
431 opregion->acpi->didl[i] = 0;
432 return;
433
434blind_set:
435 i = 0;
387 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 436 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
388 int output_type = ACPI_OTHER_OUTPUT; 437 int output_type = ACPI_OTHER_OUTPUT;
389 if (i >= 8) { 438 if (i >= 8) {
@@ -416,10 +465,7 @@ static void intel_didl_outputs(struct drm_device *dev)
416 opregion->acpi->didl[i] |= (1<<31) | output_type | i; 465 opregion->acpi->didl[i] |= (1<<31) | output_type | i;
417 i++; 466 i++;
418 } 467 }
419 468 goto end;
420 /* If fewer than 8 outputs, the list must be null terminated */
421 if (i < 8)
422 opregion->acpi->didl[i] = 0;
423} 469}
424 470
425int intel_opregion_init(struct drm_device *dev, int resume) 471int intel_opregion_init(struct drm_device *dev, int resume)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cbbf59f56dfa..4cbc5210fd30 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -230,6 +230,16 @@
230#define ASYNC_FLIP (1<<22) 230#define ASYNC_FLIP (1<<22)
231#define DISPLAY_PLANE_A (0<<20) 231#define DISPLAY_PLANE_A (0<<20)
232#define DISPLAY_PLANE_B (1<<20) 232#define DISPLAY_PLANE_B (1<<20)
233#define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
234#define PIPE_CONTROL_QW_WRITE (1<<14)
235#define PIPE_CONTROL_DEPTH_STALL (1<<13)
236#define PIPE_CONTROL_WC_FLUSH (1<<12)
237#define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */
238#define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
239#define PIPE_CONTROL_ISP_DIS (1<<9)
240#define PIPE_CONTROL_NOTIFY (1<<8)
241#define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
242#define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */
233 243
234/* 244/*
235 * Fence registers 245 * Fence registers
@@ -241,7 +251,7 @@
241#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 251#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
242#define I830_FENCE_PITCH_SHIFT 4 252#define I830_FENCE_PITCH_SHIFT 4
243#define I830_FENCE_REG_VALID (1<<0) 253#define I830_FENCE_REG_VALID (1<<0)
244#define I915_FENCE_MAX_PITCH_VAL 0x10 254#define I915_FENCE_MAX_PITCH_VAL 4
245#define I830_FENCE_MAX_PITCH_VAL 6 255#define I830_FENCE_MAX_PITCH_VAL 6
246#define I830_FENCE_MAX_SIZE_VAL (1<<8) 256#define I830_FENCE_MAX_SIZE_VAL (1<<8)
247 257
@@ -2285,6 +2295,7 @@
2285#define DEIER 0x4400c 2295#define DEIER 0x4400c
2286 2296
2287/* GT interrupt */ 2297/* GT interrupt */
2298#define GT_PIPE_NOTIFY (1 << 4)
2288#define GT_SYNC_STATUS (1 << 2) 2299#define GT_SYNC_STATUS (1 << 2)
2289#define GT_USER_INTERRUPT (1 << 0) 2300#define GT_USER_INTERRUPT (1 << 0)
2290 2301
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index fccf07470c8f..759c2ef72eff 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "drm.h" 30#include "drm.h"
30#include "drm_crtc.h" 31#include "drm_crtc.h"
@@ -246,19 +247,19 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
246 247
247static bool intel_crt_detect_ddc(struct drm_connector *connector) 248static bool intel_crt_detect_ddc(struct drm_connector *connector)
248{ 249{
249 struct intel_output *intel_output = to_intel_output(connector); 250 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
250 251
251 /* CRT should always be at 0, but check anyway */ 252 /* CRT should always be at 0, but check anyway */
252 if (intel_output->type != INTEL_OUTPUT_ANALOG) 253 if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
253 return false; 254 return false;
254 255
255 return intel_ddc_probe(intel_output); 256 return intel_ddc_probe(intel_encoder);
256} 257}
257 258
258static enum drm_connector_status 259static enum drm_connector_status
259intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output) 260intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
260{ 261{
261 struct drm_encoder *encoder = &intel_output->enc; 262 struct drm_encoder *encoder = &intel_encoder->enc;
262 struct drm_device *dev = encoder->dev; 263 struct drm_device *dev = encoder->dev;
263 struct drm_i915_private *dev_priv = dev->dev_private; 264 struct drm_i915_private *dev_priv = dev->dev_private;
264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -386,8 +387,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
386static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 387static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
387{ 388{
388 struct drm_device *dev = connector->dev; 389 struct drm_device *dev = connector->dev;
389 struct intel_output *intel_output = to_intel_output(connector); 390 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
390 struct drm_encoder *encoder = &intel_output->enc; 391 struct drm_encoder *encoder = &intel_encoder->enc;
391 struct drm_crtc *crtc; 392 struct drm_crtc *crtc;
392 int dpms_mode; 393 int dpms_mode;
393 enum drm_connector_status status; 394 enum drm_connector_status status;
@@ -404,13 +405,13 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
404 405
405 /* for pre-945g platforms use load detect */ 406 /* for pre-945g platforms use load detect */
406 if (encoder->crtc && encoder->crtc->enabled) { 407 if (encoder->crtc && encoder->crtc->enabled) {
407 status = intel_crt_load_detect(encoder->crtc, intel_output); 408 status = intel_crt_load_detect(encoder->crtc, intel_encoder);
408 } else { 409 } else {
409 crtc = intel_get_load_detect_pipe(intel_output, 410 crtc = intel_get_load_detect_pipe(intel_encoder,
410 NULL, &dpms_mode); 411 NULL, &dpms_mode);
411 if (crtc) { 412 if (crtc) {
412 status = intel_crt_load_detect(crtc, intel_output); 413 status = intel_crt_load_detect(crtc, intel_encoder);
413 intel_release_load_detect_pipe(intel_output, dpms_mode); 414 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
414 } else 415 } else
415 status = connector_status_unknown; 416 status = connector_status_unknown;
416 } 417 }
@@ -420,9 +421,9 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
420 421
421static void intel_crt_destroy(struct drm_connector *connector) 422static void intel_crt_destroy(struct drm_connector *connector)
422{ 423{
423 struct intel_output *intel_output = to_intel_output(connector); 424 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
424 425
425 intel_i2c_destroy(intel_output->ddc_bus); 426 intel_i2c_destroy(intel_encoder->ddc_bus);
426 drm_sysfs_connector_remove(connector); 427 drm_sysfs_connector_remove(connector);
427 drm_connector_cleanup(connector); 428 drm_connector_cleanup(connector);
428 kfree(connector); 429 kfree(connector);
@@ -431,28 +432,28 @@ static void intel_crt_destroy(struct drm_connector *connector)
431static int intel_crt_get_modes(struct drm_connector *connector) 432static int intel_crt_get_modes(struct drm_connector *connector)
432{ 433{
433 int ret; 434 int ret;
434 struct intel_output *intel_output = to_intel_output(connector); 435 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
435 struct i2c_adapter *ddcbus; 436 struct i2c_adapter *ddcbus;
436 struct drm_device *dev = connector->dev; 437 struct drm_device *dev = connector->dev;
437 438
438 439
439 ret = intel_ddc_get_modes(intel_output); 440 ret = intel_ddc_get_modes(intel_encoder);
440 if (ret || !IS_G4X(dev)) 441 if (ret || !IS_G4X(dev))
441 goto end; 442 goto end;
442 443
443 ddcbus = intel_output->ddc_bus; 444 ddcbus = intel_encoder->ddc_bus;
444 /* Try to probe digital port for output in DVI-I -> VGA mode. */ 445 /* Try to probe digital port for output in DVI-I -> VGA mode. */
445 intel_output->ddc_bus = 446 intel_encoder->ddc_bus =
446 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D"); 447 intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
447 448
448 if (!intel_output->ddc_bus) { 449 if (!intel_encoder->ddc_bus) {
449 intel_output->ddc_bus = ddcbus; 450 intel_encoder->ddc_bus = ddcbus;
450 dev_printk(KERN_ERR, &connector->dev->pdev->dev, 451 dev_printk(KERN_ERR, &connector->dev->pdev->dev,
451 "DDC bus registration failed for CRTDDC_D.\n"); 452 "DDC bus registration failed for CRTDDC_D.\n");
452 goto end; 453 goto end;
453 } 454 }
454 /* Try to get modes by GPIOD port */ 455 /* Try to get modes by GPIOD port */
455 ret = intel_ddc_get_modes(intel_output); 456 ret = intel_ddc_get_modes(intel_encoder);
456 intel_i2c_destroy(ddcbus); 457 intel_i2c_destroy(ddcbus);
457 458
458end: 459end:
@@ -505,23 +506,23 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
505void intel_crt_init(struct drm_device *dev) 506void intel_crt_init(struct drm_device *dev)
506{ 507{
507 struct drm_connector *connector; 508 struct drm_connector *connector;
508 struct intel_output *intel_output; 509 struct intel_encoder *intel_encoder;
509 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = dev->dev_private;
510 u32 i2c_reg; 511 u32 i2c_reg;
511 512
512 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 513 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
513 if (!intel_output) 514 if (!intel_encoder)
514 return; 515 return;
515 516
516 connector = &intel_output->base; 517 connector = &intel_encoder->base;
517 drm_connector_init(dev, &intel_output->base, 518 drm_connector_init(dev, &intel_encoder->base,
518 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 519 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
519 520
520 drm_encoder_init(dev, &intel_output->enc, &intel_crt_enc_funcs, 521 drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
521 DRM_MODE_ENCODER_DAC); 522 DRM_MODE_ENCODER_DAC);
522 523
523 drm_mode_connector_attach_encoder(&intel_output->base, 524 drm_mode_connector_attach_encoder(&intel_encoder->base,
524 &intel_output->enc); 525 &intel_encoder->enc);
525 526
526 /* Set up the DDC bus. */ 527 /* Set up the DDC bus. */
527 if (HAS_PCH_SPLIT(dev)) 528 if (HAS_PCH_SPLIT(dev))
@@ -532,22 +533,22 @@ void intel_crt_init(struct drm_device *dev)
532 if (dev_priv->crt_ddc_bus != 0) 533 if (dev_priv->crt_ddc_bus != 0)
533 i2c_reg = dev_priv->crt_ddc_bus; 534 i2c_reg = dev_priv->crt_ddc_bus;
534 } 535 }
535 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 536 intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
536 if (!intel_output->ddc_bus) { 537 if (!intel_encoder->ddc_bus) {
537 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 538 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
538 "failed.\n"); 539 "failed.\n");
539 return; 540 return;
540 } 541 }
541 542
542 intel_output->type = INTEL_OUTPUT_ANALOG; 543 intel_encoder->type = INTEL_OUTPUT_ANALOG;
543 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 544 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
544 (1 << INTEL_ANALOG_CLONE_BIT) | 545 (1 << INTEL_ANALOG_CLONE_BIT) |
545 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 546 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
546 intel_output->crtc_mask = (1 << 0) | (1 << 1); 547 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
547 connector->interlace_allowed = 0; 548 connector->interlace_allowed = 0;
548 connector->doublescan_allowed = 0; 549 connector->doublescan_allowed = 0;
549 550
550 drm_encoder_helper_add(&intel_output->enc, &intel_crt_helper_funcs); 551 drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
551 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 552 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
552 553
553 drm_sysfs_connector_add(connector); 554 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 58fc7fa0eb1d..c7502b6b1600 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -28,6 +28,7 @@
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "intel_drv.h" 33#include "intel_drv.h"
33#include "i915_drm.h" 34#include "i915_drm.h"
@@ -746,16 +747,16 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
746 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 747 list_for_each_entry(l_entry, &mode_config->connector_list, head) {
747 if (l_entry->encoder && 748 if (l_entry->encoder &&
748 l_entry->encoder->crtc == crtc) { 749 l_entry->encoder->crtc == crtc) {
749 struct intel_output *intel_output = to_intel_output(l_entry); 750 struct intel_encoder *intel_encoder = to_intel_encoder(l_entry);
750 if (intel_output->type == type) 751 if (intel_encoder->type == type)
751 return true; 752 return true;
752 } 753 }
753 } 754 }
754 return false; 755 return false;
755} 756}
756 757
757struct drm_connector * 758static struct drm_connector *
758intel_pipe_get_output (struct drm_crtc *crtc) 759intel_pipe_get_connector (struct drm_crtc *crtc)
759{ 760{
760 struct drm_device *dev = crtc->dev; 761 struct drm_device *dev = crtc->dev;
761 struct drm_mode_config *mode_config = &dev->mode_config; 762 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -1002,7 +1003,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1002 struct drm_i915_private *dev_priv = dev->dev_private; 1003 struct drm_i915_private *dev_priv = dev->dev_private;
1003 struct drm_framebuffer *fb = crtc->fb; 1004 struct drm_framebuffer *fb = crtc->fb;
1004 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1005 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1005 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1006 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1007 int plane, i; 1008 int plane, i;
1008 u32 fbc_ctl, fbc_ctl2; 1009 u32 fbc_ctl, fbc_ctl2;
@@ -1079,7 +1080,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1079 struct drm_i915_private *dev_priv = dev->dev_private; 1080 struct drm_i915_private *dev_priv = dev->dev_private;
1080 struct drm_framebuffer *fb = crtc->fb; 1081 struct drm_framebuffer *fb = crtc->fb;
1081 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1082 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1082 struct drm_i915_gem_object *obj_priv = intel_fb->obj->driver_private; 1083 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
1083 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1084 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1084 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : 1085 int plane = (intel_crtc->plane == 0 ? DPFC_CTL_PLANEA :
1085 DPFC_CTL_PLANEB); 1086 DPFC_CTL_PLANEB);
@@ -1175,7 +1176,7 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1175 return; 1176 return;
1176 1177
1177 intel_fb = to_intel_framebuffer(fb); 1178 intel_fb = to_intel_framebuffer(fb);
1178 obj_priv = intel_fb->obj->driver_private; 1179 obj_priv = to_intel_bo(intel_fb->obj);
1179 1180
1180 /* 1181 /*
1181 * If FBC is already on, we just have to verify that we can 1182 * If FBC is already on, we just have to verify that we can
@@ -1242,7 +1243,7 @@ out_disable:
1242static int 1243static int
1243intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) 1244intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1244{ 1245{
1245 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1246 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1246 u32 alignment; 1247 u32 alignment;
1247 int ret; 1248 int ret;
1248 1249
@@ -1322,7 +1323,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1322 1323
1323 intel_fb = to_intel_framebuffer(crtc->fb); 1324 intel_fb = to_intel_framebuffer(crtc->fb);
1324 obj = intel_fb->obj; 1325 obj = intel_fb->obj;
1325 obj_priv = obj->driver_private; 1326 obj_priv = to_intel_bo(obj);
1326 1327
1327 mutex_lock(&dev->struct_mutex); 1328 mutex_lock(&dev->struct_mutex);
1328 ret = intel_pin_and_fence_fb_obj(dev, obj); 1329 ret = intel_pin_and_fence_fb_obj(dev, obj);
@@ -1400,7 +1401,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1400 1401
1401 if (old_fb) { 1402 if (old_fb) {
1402 intel_fb = to_intel_framebuffer(old_fb); 1403 intel_fb = to_intel_framebuffer(old_fb);
1403 obj_priv = intel_fb->obj->driver_private; 1404 obj_priv = to_intel_bo(intel_fb->obj);
1404 i915_gem_object_unpin(intel_fb->obj); 1405 i915_gem_object_unpin(intel_fb->obj);
1405 } 1406 }
1406 intel_increase_pllclock(crtc, true); 1407 intel_increase_pllclock(crtc, true);
@@ -2916,7 +2917,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2916 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE; 2917 int dspsize_reg = (plane == 0) ? DSPASIZE : DSPBSIZE;
2917 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS; 2918 int dsppos_reg = (plane == 0) ? DSPAPOS : DSPBPOS;
2918 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 2919 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
2919 int refclk, num_outputs = 0; 2920 int refclk, num_connectors = 0;
2920 intel_clock_t clock, reduced_clock; 2921 intel_clock_t clock, reduced_clock;
2921 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf; 2922 u32 dpll = 0, fp = 0, fp2 = 0, dspcntr, pipeconf;
2922 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; 2923 bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false;
@@ -2942,19 +2943,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 drm_vblank_pre_modeset(dev, pipe); 2943 drm_vblank_pre_modeset(dev, pipe);
2943 2944
2944 list_for_each_entry(connector, &mode_config->connector_list, head) { 2945 list_for_each_entry(connector, &mode_config->connector_list, head) {
2945 struct intel_output *intel_output = to_intel_output(connector); 2946 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2946 2947
2947 if (!connector->encoder || connector->encoder->crtc != crtc) 2948 if (!connector->encoder || connector->encoder->crtc != crtc)
2948 continue; 2949 continue;
2949 2950
2950 switch (intel_output->type) { 2951 switch (intel_encoder->type) {
2951 case INTEL_OUTPUT_LVDS: 2952 case INTEL_OUTPUT_LVDS:
2952 is_lvds = true; 2953 is_lvds = true;
2953 break; 2954 break;
2954 case INTEL_OUTPUT_SDVO: 2955 case INTEL_OUTPUT_SDVO:
2955 case INTEL_OUTPUT_HDMI: 2956 case INTEL_OUTPUT_HDMI:
2956 is_sdvo = true; 2957 is_sdvo = true;
2957 if (intel_output->needs_tv_clock) 2958 if (intel_encoder->needs_tv_clock)
2958 is_tv = true; 2959 is_tv = true;
2959 break; 2960 break;
2960 case INTEL_OUTPUT_DVO: 2961 case INTEL_OUTPUT_DVO:
@@ -2974,10 +2975,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2974 break; 2975 break;
2975 } 2976 }
2976 2977
2977 num_outputs++; 2978 num_connectors++;
2978 } 2979 }
2979 2980
2980 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2981 if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2) {
2981 refclk = dev_priv->lvds_ssc_freq * 1000; 2982 refclk = dev_priv->lvds_ssc_freq * 1000;
2982 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", 2983 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2983 refclk / 1000); 2984 refclk / 1000);
@@ -3048,8 +3049,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3048 if (is_edp) { 3049 if (is_edp) {
3049 struct drm_connector *edp; 3050 struct drm_connector *edp;
3050 target_clock = mode->clock; 3051 target_clock = mode->clock;
3051 edp = intel_pipe_get_output(crtc); 3052 edp = intel_pipe_get_connector(crtc);
3052 intel_edp_link_config(to_intel_output(edp), 3053 intel_edp_link_config(to_intel_encoder(edp),
3053 &lane, &link_bw); 3054 &lane, &link_bw);
3054 } else { 3055 } else {
3055 /* DP over FDI requires target mode clock 3056 /* DP over FDI requires target mode clock
@@ -3230,7 +3231,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3230 /* XXX: just matching BIOS for now */ 3231 /* XXX: just matching BIOS for now */
3231 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 3232 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3232 dpll |= 3; 3233 dpll |= 3;
3233 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) 3234 else if (is_lvds && dev_priv->lvds_use_ssc && num_connectors < 2)
3234 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 3235 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3235 else 3236 else
3236 dpll |= PLL_REF_INPUT_DREFCLK; 3237 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -3510,7 +3511,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3510 if (!bo) 3511 if (!bo)
3511 return -ENOENT; 3512 return -ENOENT;
3512 3513
3513 obj_priv = bo->driver_private; 3514 obj_priv = to_intel_bo(bo);
3514 3515
3515 if (bo->size < width * height * 4) { 3516 if (bo->size < width * height * 4) {
3516 DRM_ERROR("buffer is to small\n"); 3517 DRM_ERROR("buffer is to small\n");
@@ -3654,9 +3655,9 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
3654 * detection. 3655 * detection.
3655 * 3656 *
3656 * It will be up to the load-detect code to adjust the pipe as appropriate for 3657 * It will be up to the load-detect code to adjust the pipe as appropriate for
3657 * its requirements. The pipe will be connected to no other outputs. 3658 * its requirements. The pipe will be connected to no other encoders.
3658 * 3659 *
3659 * Currently this code will only succeed if there is a pipe with no outputs 3660 * Currently this code will only succeed if there is a pipe with no encoders
3660 * configured for it. In the future, it could choose to temporarily disable 3661 * configured for it. In the future, it could choose to temporarily disable
3661 * some outputs to free up a pipe for its use. 3662 * some outputs to free up a pipe for its use.
3662 * 3663 *
@@ -3669,14 +3670,14 @@ static struct drm_display_mode load_detect_mode = {
3669 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 3670 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
3670}; 3671};
3671 3672
3672struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 3673struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
3673 struct drm_display_mode *mode, 3674 struct drm_display_mode *mode,
3674 int *dpms_mode) 3675 int *dpms_mode)
3675{ 3676{
3676 struct intel_crtc *intel_crtc; 3677 struct intel_crtc *intel_crtc;
3677 struct drm_crtc *possible_crtc; 3678 struct drm_crtc *possible_crtc;
3678 struct drm_crtc *supported_crtc =NULL; 3679 struct drm_crtc *supported_crtc =NULL;
3679 struct drm_encoder *encoder = &intel_output->enc; 3680 struct drm_encoder *encoder = &intel_encoder->enc;
3680 struct drm_crtc *crtc = NULL; 3681 struct drm_crtc *crtc = NULL;
3681 struct drm_device *dev = encoder->dev; 3682 struct drm_device *dev = encoder->dev;
3682 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3683 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -3728,8 +3729,8 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3728 } 3729 }
3729 3730
3730 encoder->crtc = crtc; 3731 encoder->crtc = crtc;
3731 intel_output->base.encoder = encoder; 3732 intel_encoder->base.encoder = encoder;
3732 intel_output->load_detect_temp = true; 3733 intel_encoder->load_detect_temp = true;
3733 3734
3734 intel_crtc = to_intel_crtc(crtc); 3735 intel_crtc = to_intel_crtc(crtc);
3735 *dpms_mode = intel_crtc->dpms_mode; 3736 *dpms_mode = intel_crtc->dpms_mode;
@@ -3754,23 +3755,23 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
3754 return crtc; 3755 return crtc;
3755} 3756}
3756 3757
3757void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_mode) 3758void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, int dpms_mode)
3758{ 3759{
3759 struct drm_encoder *encoder = &intel_output->enc; 3760 struct drm_encoder *encoder = &intel_encoder->enc;
3760 struct drm_device *dev = encoder->dev; 3761 struct drm_device *dev = encoder->dev;
3761 struct drm_crtc *crtc = encoder->crtc; 3762 struct drm_crtc *crtc = encoder->crtc;
3762 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 3763 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3763 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 3764 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3764 3765
3765 if (intel_output->load_detect_temp) { 3766 if (intel_encoder->load_detect_temp) {
3766 encoder->crtc = NULL; 3767 encoder->crtc = NULL;
3767 intel_output->base.encoder = NULL; 3768 intel_encoder->base.encoder = NULL;
3768 intel_output->load_detect_temp = false; 3769 intel_encoder->load_detect_temp = false;
3769 crtc->enabled = drm_helper_crtc_in_use(crtc); 3770 crtc->enabled = drm_helper_crtc_in_use(crtc);
3770 drm_helper_disable_unused_functions(dev); 3771 drm_helper_disable_unused_functions(dev);
3771 } 3772 }
3772 3773
3773 /* Switch crtc and output back off if necessary */ 3774 /* Switch crtc and encoder back off if necessary */
3774 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) { 3775 if (crtc->enabled && dpms_mode != DRM_MODE_DPMS_ON) {
3775 if (encoder->crtc == crtc) 3776 if (encoder->crtc == crtc)
3776 encoder_funcs->dpms(encoder, dpms_mode); 3777 encoder_funcs->dpms(encoder, dpms_mode);
@@ -4155,7 +4156,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4155 work = intel_crtc->unpin_work; 4156 work = intel_crtc->unpin_work;
4156 if (work == NULL || !work->pending) { 4157 if (work == NULL || !work->pending) {
4157 if (work && !work->pending) { 4158 if (work && !work->pending) {
4158 obj_priv = work->pending_flip_obj->driver_private; 4159 obj_priv = to_intel_bo(work->pending_flip_obj);
4159 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", 4160 DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n",
4160 obj_priv, 4161 obj_priv,
4161 atomic_read(&obj_priv->pending_flip)); 4162 atomic_read(&obj_priv->pending_flip));
@@ -4180,7 +4181,7 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe)
4180 4181
4181 spin_unlock_irqrestore(&dev->event_lock, flags); 4182 spin_unlock_irqrestore(&dev->event_lock, flags);
4182 4183
4183 obj_priv = work->pending_flip_obj->driver_private; 4184 obj_priv = to_intel_bo(work->pending_flip_obj);
4184 4185
4185 /* Initial scanout buffer will have a 0 pending flip count */ 4186 /* Initial scanout buffer will have a 0 pending flip count */
4186 if ((atomic_read(&obj_priv->pending_flip) == 0) || 4187 if ((atomic_read(&obj_priv->pending_flip) == 0) ||
@@ -4251,7 +4252,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4251 ret = intel_pin_and_fence_fb_obj(dev, obj); 4252 ret = intel_pin_and_fence_fb_obj(dev, obj);
4252 if (ret != 0) { 4253 if (ret != 0) {
4253 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", 4254 DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
4254 obj->driver_private); 4255 to_intel_bo(obj));
4255 kfree(work); 4256 kfree(work);
4256 intel_crtc->unpin_work = NULL; 4257 intel_crtc->unpin_work = NULL;
4257 mutex_unlock(&dev->struct_mutex); 4258 mutex_unlock(&dev->struct_mutex);
@@ -4265,7 +4266,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
4265 crtc->fb = fb; 4266 crtc->fb = fb;
4266 i915_gem_object_flush_write_domain(obj); 4267 i915_gem_object_flush_write_domain(obj);
4267 drm_vblank_get(dev, intel_crtc->pipe); 4268 drm_vblank_get(dev, intel_crtc->pipe);
4268 obj_priv = obj->driver_private; 4269 obj_priv = to_intel_bo(obj);
4269 atomic_inc(&obj_priv->pending_flip); 4270 atomic_inc(&obj_priv->pending_flip);
4270 work->pending_flip_obj = obj; 4271 work->pending_flip_obj = obj;
4271 4272
@@ -4398,8 +4399,8 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
4398 int entry = 0; 4399 int entry = 0;
4399 4400
4400 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4401 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4401 struct intel_output *intel_output = to_intel_output(connector); 4402 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4402 if (type_mask & intel_output->clone_mask) 4403 if (type_mask & intel_encoder->clone_mask)
4403 index_mask |= (1 << entry); 4404 index_mask |= (1 << entry);
4404 entry++; 4405 entry++;
4405 } 4406 }
@@ -4494,12 +4495,12 @@ static void intel_setup_outputs(struct drm_device *dev)
4494 intel_tv_init(dev); 4495 intel_tv_init(dev);
4495 4496
4496 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4497 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4497 struct intel_output *intel_output = to_intel_output(connector); 4498 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4498 struct drm_encoder *encoder = &intel_output->enc; 4499 struct drm_encoder *encoder = &intel_encoder->enc;
4499 4500
4500 encoder->possible_crtcs = intel_output->crtc_mask; 4501 encoder->possible_crtcs = intel_encoder->crtc_mask;
4501 encoder->possible_clones = intel_connector_clones(dev, 4502 encoder->possible_clones = intel_connector_clones(dev,
4502 intel_output->clone_mask); 4503 intel_encoder->clone_mask);
4503 } 4504 }
4504} 4505}
4505 4506
@@ -4778,14 +4779,14 @@ void intel_init_clock_gating(struct drm_device *dev)
4778 struct drm_i915_gem_object *obj_priv = NULL; 4779 struct drm_i915_gem_object *obj_priv = NULL;
4779 4780
4780 if (dev_priv->pwrctx) { 4781 if (dev_priv->pwrctx) {
4781 obj_priv = dev_priv->pwrctx->driver_private; 4782 obj_priv = to_intel_bo(dev_priv->pwrctx);
4782 } else { 4783 } else {
4783 struct drm_gem_object *pwrctx; 4784 struct drm_gem_object *pwrctx;
4784 4785
4785 pwrctx = intel_alloc_power_context(dev); 4786 pwrctx = intel_alloc_power_context(dev);
4786 if (pwrctx) { 4787 if (pwrctx) {
4787 dev_priv->pwrctx = pwrctx; 4788 dev_priv->pwrctx = pwrctx;
4788 obj_priv = pwrctx->driver_private; 4789 obj_priv = to_intel_bo(pwrctx);
4789 } 4790 }
4790 } 4791 }
4791 4792
@@ -4814,7 +4815,7 @@ static void intel_init_display(struct drm_device *dev)
4814 dev_priv->display.fbc_enabled = g4x_fbc_enabled; 4815 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
4815 dev_priv->display.enable_fbc = g4x_enable_fbc; 4816 dev_priv->display.enable_fbc = g4x_enable_fbc;
4816 dev_priv->display.disable_fbc = g4x_disable_fbc; 4817 dev_priv->display.disable_fbc = g4x_disable_fbc;
4817 } else if (IS_I965GM(dev) || IS_I945GM(dev) || IS_I915GM(dev)) { 4818 } else if (IS_I965GM(dev)) {
4818 dev_priv->display.fbc_enabled = i8xx_fbc_enabled; 4819 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
4819 dev_priv->display.enable_fbc = i8xx_enable_fbc; 4820 dev_priv->display.enable_fbc = i8xx_enable_fbc;
4820 dev_priv->display.disable_fbc = i8xx_disable_fbc; 4821 dev_priv->display.disable_fbc = i8xx_disable_fbc;
@@ -4852,17 +4853,18 @@ static void intel_init_display(struct drm_device *dev)
4852 dev_priv->display.update_wm = g4x_update_wm; 4853 dev_priv->display.update_wm = g4x_update_wm;
4853 else if (IS_I965G(dev)) 4854 else if (IS_I965G(dev))
4854 dev_priv->display.update_wm = i965_update_wm; 4855 dev_priv->display.update_wm = i965_update_wm;
4855 else if (IS_I9XX(dev) || IS_MOBILE(dev)) { 4856 else if (IS_I9XX(dev)) {
4856 dev_priv->display.update_wm = i9xx_update_wm; 4857 dev_priv->display.update_wm = i9xx_update_wm;
4857 dev_priv->display.get_fifo_size = i9xx_get_fifo_size; 4858 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
4859 } else if (IS_I85X(dev)) {
4860 dev_priv->display.update_wm = i9xx_update_wm;
4861 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
4858 } else { 4862 } else {
4859 if (IS_I85X(dev)) 4863 dev_priv->display.update_wm = i830_update_wm;
4860 dev_priv->display.get_fifo_size = i85x_get_fifo_size; 4864 if (IS_845G(dev))
4861 else if (IS_845G(dev))
4862 dev_priv->display.get_fifo_size = i845_get_fifo_size; 4865 dev_priv->display.get_fifo_size = i845_get_fifo_size;
4863 else 4866 else
4864 dev_priv->display.get_fifo_size = i830_get_fifo_size; 4867 dev_priv->display.get_fifo_size = i830_get_fifo_size;
4865 dev_priv->display.update_wm = i830_update_wm;
4866 } 4868 }
4867} 4869}
4868 4870
@@ -4956,7 +4958,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
4956 if (dev_priv->pwrctx) { 4958 if (dev_priv->pwrctx) {
4957 struct drm_i915_gem_object *obj_priv; 4959 struct drm_i915_gem_object *obj_priv;
4958 4960
4959 obj_priv = dev_priv->pwrctx->driver_private; 4961 obj_priv = to_intel_bo(dev_priv->pwrctx);
4960 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); 4962 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4961 I915_READ(PWRCTXA); 4963 I915_READ(PWRCTXA);
4962 i915_gem_object_unpin(dev_priv->pwrctx); 4964 i915_gem_object_unpin(dev_priv->pwrctx);
@@ -4977,9 +4979,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
4977*/ 4979*/
4978struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 4980struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
4979{ 4981{
4980 struct intel_output *intel_output = to_intel_output(connector); 4982 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
4981 4983
4982 return &intel_output->enc; 4984 return &intel_encoder->enc;
4983} 4985}
4984 4986
4985/* 4987/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3ef3a0d0edd0..77e40cfcf216 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "drm_crtc.h" 32#include "drm_crtc.h"
@@ -54,23 +55,23 @@ struct intel_dp_priv {
54 uint8_t link_bw; 55 uint8_t link_bw;
55 uint8_t lane_count; 56 uint8_t lane_count;
56 uint8_t dpcd[4]; 57 uint8_t dpcd[4];
57 struct intel_output *intel_output; 58 struct intel_encoder *intel_encoder;
58 struct i2c_adapter adapter; 59 struct i2c_adapter adapter;
59 struct i2c_algo_dp_aux_data algo; 60 struct i2c_algo_dp_aux_data algo;
60}; 61};
61 62
62static void 63static void
63intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 64intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
64 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]); 65 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]);
65 66
66static void 67static void
67intel_dp_link_down(struct intel_output *intel_output, uint32_t DP); 68intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP);
68 69
69void 70void
70intel_edp_link_config (struct intel_output *intel_output, 71intel_edp_link_config (struct intel_encoder *intel_encoder,
71 int *lane_num, int *link_bw) 72 int *lane_num, int *link_bw)
72{ 73{
73 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 74 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
74 75
75 *lane_num = dp_priv->lane_count; 76 *lane_num = dp_priv->lane_count;
76 if (dp_priv->link_bw == DP_LINK_BW_1_62) 77 if (dp_priv->link_bw == DP_LINK_BW_1_62)
@@ -80,9 +81,9 @@ intel_edp_link_config (struct intel_output *intel_output,
80} 81}
81 82
82static int 83static int
83intel_dp_max_lane_count(struct intel_output *intel_output) 84intel_dp_max_lane_count(struct intel_encoder *intel_encoder)
84{ 85{
85 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 86 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
86 int max_lane_count = 4; 87 int max_lane_count = 4;
87 88
88 if (dp_priv->dpcd[0] >= 0x11) { 89 if (dp_priv->dpcd[0] >= 0x11) {
@@ -98,9 +99,9 @@ intel_dp_max_lane_count(struct intel_output *intel_output)
98} 99}
99 100
100static int 101static int
101intel_dp_max_link_bw(struct intel_output *intel_output) 102intel_dp_max_link_bw(struct intel_encoder *intel_encoder)
102{ 103{
103 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 104 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
104 int max_link_bw = dp_priv->dpcd[1]; 105 int max_link_bw = dp_priv->dpcd[1];
105 106
106 switch (max_link_bw) { 107 switch (max_link_bw) {
@@ -126,11 +127,11 @@ intel_dp_link_clock(uint8_t link_bw)
126/* I think this is a fiction */ 127/* I think this is a fiction */
127static int 128static int
128intel_dp_link_required(struct drm_device *dev, 129intel_dp_link_required(struct drm_device *dev,
129 struct intel_output *intel_output, int pixel_clock) 130 struct intel_encoder *intel_encoder, int pixel_clock)
130{ 131{
131 struct drm_i915_private *dev_priv = dev->dev_private; 132 struct drm_i915_private *dev_priv = dev->dev_private;
132 133
133 if (IS_eDP(intel_output)) 134 if (IS_eDP(intel_encoder))
134 return (pixel_clock * dev_priv->edp_bpp) / 8; 135 return (pixel_clock * dev_priv->edp_bpp) / 8;
135 else 136 else
136 return pixel_clock * 3; 137 return pixel_clock * 3;
@@ -140,11 +141,11 @@ static int
140intel_dp_mode_valid(struct drm_connector *connector, 141intel_dp_mode_valid(struct drm_connector *connector,
141 struct drm_display_mode *mode) 142 struct drm_display_mode *mode)
142{ 143{
143 struct intel_output *intel_output = to_intel_output(connector); 144 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
144 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_output)); 145 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
145 int max_lanes = intel_dp_max_lane_count(intel_output); 146 int max_lanes = intel_dp_max_lane_count(intel_encoder);
146 147
147 if (intel_dp_link_required(connector->dev, intel_output, mode->clock) 148 if (intel_dp_link_required(connector->dev, intel_encoder, mode->clock)
148 > max_link_clock * max_lanes) 149 > max_link_clock * max_lanes)
149 return MODE_CLOCK_HIGH; 150 return MODE_CLOCK_HIGH;
150 151
@@ -208,13 +209,13 @@ intel_hrawclk(struct drm_device *dev)
208} 209}
209 210
210static int 211static int
211intel_dp_aux_ch(struct intel_output *intel_output, 212intel_dp_aux_ch(struct intel_encoder *intel_encoder,
212 uint8_t *send, int send_bytes, 213 uint8_t *send, int send_bytes,
213 uint8_t *recv, int recv_size) 214 uint8_t *recv, int recv_size)
214{ 215{
215 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 216 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
216 uint32_t output_reg = dp_priv->output_reg; 217 uint32_t output_reg = dp_priv->output_reg;
217 struct drm_device *dev = intel_output->base.dev; 218 struct drm_device *dev = intel_encoder->base.dev;
218 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = dev->dev_private;
219 uint32_t ch_ctl = output_reg + 0x10; 220 uint32_t ch_ctl = output_reg + 0x10;
220 uint32_t ch_data = ch_ctl + 4; 221 uint32_t ch_data = ch_ctl + 4;
@@ -229,7 +230,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
229 * and would like to run at 2MHz. So, take the 230 * and would like to run at 2MHz. So, take the
230 * hrawclk value and divide by 2 and use that 231 * hrawclk value and divide by 2 and use that
231 */ 232 */
232 if (IS_eDP(intel_output)) 233 if (IS_eDP(intel_encoder))
233 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 234 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
234 else if (HAS_PCH_SPLIT(dev)) 235 else if (HAS_PCH_SPLIT(dev))
235 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ 236 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
@@ -312,7 +313,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
312 313
313/* Write data to the aux channel in native mode */ 314/* Write data to the aux channel in native mode */
314static int 315static int
315intel_dp_aux_native_write(struct intel_output *intel_output, 316intel_dp_aux_native_write(struct intel_encoder *intel_encoder,
316 uint16_t address, uint8_t *send, int send_bytes) 317 uint16_t address, uint8_t *send, int send_bytes)
317{ 318{
318 int ret; 319 int ret;
@@ -329,7 +330,7 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
329 memcpy(&msg[4], send, send_bytes); 330 memcpy(&msg[4], send, send_bytes);
330 msg_bytes = send_bytes + 4; 331 msg_bytes = send_bytes + 4;
331 for (;;) { 332 for (;;) {
332 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, &ack, 1); 333 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes, &ack, 1);
333 if (ret < 0) 334 if (ret < 0)
334 return ret; 335 return ret;
335 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 336 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
@@ -344,15 +345,15 @@ intel_dp_aux_native_write(struct intel_output *intel_output,
344 345
345/* Write a single byte to the aux channel in native mode */ 346/* Write a single byte to the aux channel in native mode */
346static int 347static int
347intel_dp_aux_native_write_1(struct intel_output *intel_output, 348intel_dp_aux_native_write_1(struct intel_encoder *intel_encoder,
348 uint16_t address, uint8_t byte) 349 uint16_t address, uint8_t byte)
349{ 350{
350 return intel_dp_aux_native_write(intel_output, address, &byte, 1); 351 return intel_dp_aux_native_write(intel_encoder, address, &byte, 1);
351} 352}
352 353
353/* read bytes from a native aux channel */ 354/* read bytes from a native aux channel */
354static int 355static int
355intel_dp_aux_native_read(struct intel_output *intel_output, 356intel_dp_aux_native_read(struct intel_encoder *intel_encoder,
356 uint16_t address, uint8_t *recv, int recv_bytes) 357 uint16_t address, uint8_t *recv, int recv_bytes)
357{ 358{
358 uint8_t msg[4]; 359 uint8_t msg[4];
@@ -371,7 +372,7 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
371 reply_bytes = recv_bytes + 1; 372 reply_bytes = recv_bytes + 1;
372 373
373 for (;;) { 374 for (;;) {
374 ret = intel_dp_aux_ch(intel_output, msg, msg_bytes, 375 ret = intel_dp_aux_ch(intel_encoder, msg, msg_bytes,
375 reply, reply_bytes); 376 reply, reply_bytes);
376 if (ret == 0) 377 if (ret == 0)
377 return -EPROTO; 378 return -EPROTO;
@@ -397,7 +398,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
397 struct intel_dp_priv *dp_priv = container_of(adapter, 398 struct intel_dp_priv *dp_priv = container_of(adapter,
398 struct intel_dp_priv, 399 struct intel_dp_priv,
399 adapter); 400 adapter);
400 struct intel_output *intel_output = dp_priv->intel_output; 401 struct intel_encoder *intel_encoder = dp_priv->intel_encoder;
401 uint16_t address = algo_data->address; 402 uint16_t address = algo_data->address;
402 uint8_t msg[5]; 403 uint8_t msg[5];
403 uint8_t reply[2]; 404 uint8_t reply[2];
@@ -436,7 +437,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
436 } 437 }
437 438
438 for (;;) { 439 for (;;) {
439 ret = intel_dp_aux_ch(intel_output, 440 ret = intel_dp_aux_ch(intel_encoder,
440 msg, msg_bytes, 441 msg, msg_bytes,
441 reply, reply_bytes); 442 reply, reply_bytes);
442 if (ret < 0) { 443 if (ret < 0) {
@@ -464,9 +465,9 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
464} 465}
465 466
466static int 467static int
467intel_dp_i2c_init(struct intel_output *intel_output, const char *name) 468intel_dp_i2c_init(struct intel_encoder *intel_encoder, const char *name)
468{ 469{
469 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 470 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
470 471
471 DRM_DEBUG_KMS("i2c_init %s\n", name); 472 DRM_DEBUG_KMS("i2c_init %s\n", name);
472 dp_priv->algo.running = false; 473 dp_priv->algo.running = false;
@@ -479,7 +480,7 @@ intel_dp_i2c_init(struct intel_output *intel_output, const char *name)
479 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1); 480 strncpy (dp_priv->adapter.name, name, sizeof(dp_priv->adapter.name) - 1);
480 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0'; 481 dp_priv->adapter.name[sizeof(dp_priv->adapter.name) - 1] = '\0';
481 dp_priv->adapter.algo_data = &dp_priv->algo; 482 dp_priv->adapter.algo_data = &dp_priv->algo;
482 dp_priv->adapter.dev.parent = &intel_output->base.kdev; 483 dp_priv->adapter.dev.parent = &intel_encoder->base.kdev;
483 484
484 return i2c_dp_aux_add_bus(&dp_priv->adapter); 485 return i2c_dp_aux_add_bus(&dp_priv->adapter);
485} 486}
@@ -488,18 +489,18 @@ static bool
488intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, 489intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
489 struct drm_display_mode *adjusted_mode) 490 struct drm_display_mode *adjusted_mode)
490{ 491{
491 struct intel_output *intel_output = enc_to_intel_output(encoder); 492 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
492 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 493 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
493 int lane_count, clock; 494 int lane_count, clock;
494 int max_lane_count = intel_dp_max_lane_count(intel_output); 495 int max_lane_count = intel_dp_max_lane_count(intel_encoder);
495 int max_clock = intel_dp_max_link_bw(intel_output) == DP_LINK_BW_2_7 ? 1 : 0; 496 int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
496 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 497 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
497 498
498 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 499 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
499 for (clock = 0; clock <= max_clock; clock++) { 500 for (clock = 0; clock <= max_clock; clock++) {
500 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count; 501 int link_avail = intel_dp_link_clock(bws[clock]) * lane_count;
501 502
502 if (intel_dp_link_required(encoder->dev, intel_output, mode->clock) 503 if (intel_dp_link_required(encoder->dev, intel_encoder, mode->clock)
503 <= link_avail) { 504 <= link_avail) {
504 dp_priv->link_bw = bws[clock]; 505 dp_priv->link_bw = bws[clock];
505 dp_priv->lane_count = lane_count; 506 dp_priv->lane_count = lane_count;
@@ -561,16 +562,16 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
561 struct intel_dp_m_n m_n; 562 struct intel_dp_m_n m_n;
562 563
563 /* 564 /*
564 * Find the lane count in the intel_output private 565 * Find the lane count in the intel_encoder private
565 */ 566 */
566 list_for_each_entry(connector, &mode_config->connector_list, head) { 567 list_for_each_entry(connector, &mode_config->connector_list, head) {
567 struct intel_output *intel_output = to_intel_output(connector); 568 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
568 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 569 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
569 570
570 if (!connector->encoder || connector->encoder->crtc != crtc) 571 if (!connector->encoder || connector->encoder->crtc != crtc)
571 continue; 572 continue;
572 573
573 if (intel_output->type == INTEL_OUTPUT_DISPLAYPORT) { 574 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
574 lane_count = dp_priv->lane_count; 575 lane_count = dp_priv->lane_count;
575 break; 576 break;
576 } 577 }
@@ -625,9 +626,9 @@ static void
625intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 626intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
626 struct drm_display_mode *adjusted_mode) 627 struct drm_display_mode *adjusted_mode)
627{ 628{
628 struct intel_output *intel_output = enc_to_intel_output(encoder); 629 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
629 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 630 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
630 struct drm_crtc *crtc = intel_output->enc.crtc; 631 struct drm_crtc *crtc = intel_encoder->enc.crtc;
631 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
632 633
633 dp_priv->DP = (DP_LINK_TRAIN_OFF | 634 dp_priv->DP = (DP_LINK_TRAIN_OFF |
@@ -666,7 +667,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
666 if (intel_crtc->pipe == 1) 667 if (intel_crtc->pipe == 1)
667 dp_priv->DP |= DP_PIPEB_SELECT; 668 dp_priv->DP |= DP_PIPEB_SELECT;
668 669
669 if (IS_eDP(intel_output)) { 670 if (IS_eDP(intel_encoder)) {
670 /* don't miss out required setting for eDP */ 671 /* don't miss out required setting for eDP */
671 dp_priv->DP |= DP_PLL_ENABLE; 672 dp_priv->DP |= DP_PLL_ENABLE;
672 if (adjusted_mode->clock < 200000) 673 if (adjusted_mode->clock < 200000)
@@ -701,22 +702,22 @@ static void ironlake_edp_backlight_off (struct drm_device *dev)
701static void 702static void
702intel_dp_dpms(struct drm_encoder *encoder, int mode) 703intel_dp_dpms(struct drm_encoder *encoder, int mode)
703{ 704{
704 struct intel_output *intel_output = enc_to_intel_output(encoder); 705 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
705 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 706 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
706 struct drm_device *dev = intel_output->base.dev; 707 struct drm_device *dev = intel_encoder->base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private; 708 struct drm_i915_private *dev_priv = dev->dev_private;
708 uint32_t dp_reg = I915_READ(dp_priv->output_reg); 709 uint32_t dp_reg = I915_READ(dp_priv->output_reg);
709 710
710 if (mode != DRM_MODE_DPMS_ON) { 711 if (mode != DRM_MODE_DPMS_ON) {
711 if (dp_reg & DP_PORT_EN) { 712 if (dp_reg & DP_PORT_EN) {
712 intel_dp_link_down(intel_output, dp_priv->DP); 713 intel_dp_link_down(intel_encoder, dp_priv->DP);
713 if (IS_eDP(intel_output)) 714 if (IS_eDP(intel_encoder))
714 ironlake_edp_backlight_off(dev); 715 ironlake_edp_backlight_off(dev);
715 } 716 }
716 } else { 717 } else {
717 if (!(dp_reg & DP_PORT_EN)) { 718 if (!(dp_reg & DP_PORT_EN)) {
718 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 719 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
719 if (IS_eDP(intel_output)) 720 if (IS_eDP(intel_encoder))
720 ironlake_edp_backlight_on(dev); 721 ironlake_edp_backlight_on(dev);
721 } 722 }
722 } 723 }
@@ -728,12 +729,12 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
728 * link status information 729 * link status information
729 */ 730 */
730static bool 731static bool
731intel_dp_get_link_status(struct intel_output *intel_output, 732intel_dp_get_link_status(struct intel_encoder *intel_encoder,
732 uint8_t link_status[DP_LINK_STATUS_SIZE]) 733 uint8_t link_status[DP_LINK_STATUS_SIZE])
733{ 734{
734 int ret; 735 int ret;
735 736
736 ret = intel_dp_aux_native_read(intel_output, 737 ret = intel_dp_aux_native_read(intel_encoder,
737 DP_LANE0_1_STATUS, 738 DP_LANE0_1_STATUS,
738 link_status, DP_LINK_STATUS_SIZE); 739 link_status, DP_LINK_STATUS_SIZE);
739 if (ret != DP_LINK_STATUS_SIZE) 740 if (ret != DP_LINK_STATUS_SIZE)
@@ -751,13 +752,13 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
751static void 752static void
752intel_dp_save(struct drm_connector *connector) 753intel_dp_save(struct drm_connector *connector)
753{ 754{
754 struct intel_output *intel_output = to_intel_output(connector); 755 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
755 struct drm_device *dev = intel_output->base.dev; 756 struct drm_device *dev = intel_encoder->base.dev;
756 struct drm_i915_private *dev_priv = dev->dev_private; 757 struct drm_i915_private *dev_priv = dev->dev_private;
757 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 758 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
758 759
759 dp_priv->save_DP = I915_READ(dp_priv->output_reg); 760 dp_priv->save_DP = I915_READ(dp_priv->output_reg);
760 intel_dp_aux_native_read(intel_output, DP_LINK_BW_SET, 761 intel_dp_aux_native_read(intel_encoder, DP_LINK_BW_SET,
761 dp_priv->save_link_configuration, 762 dp_priv->save_link_configuration,
762 sizeof (dp_priv->save_link_configuration)); 763 sizeof (dp_priv->save_link_configuration));
763} 764}
@@ -824,7 +825,7 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
824} 825}
825 826
826static void 827static void
827intel_get_adjust_train(struct intel_output *intel_output, 828intel_get_adjust_train(struct intel_encoder *intel_encoder,
828 uint8_t link_status[DP_LINK_STATUS_SIZE], 829 uint8_t link_status[DP_LINK_STATUS_SIZE],
829 int lane_count, 830 int lane_count,
830 uint8_t train_set[4]) 831 uint8_t train_set[4])
@@ -941,15 +942,15 @@ intel_channel_eq_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
941} 942}
942 943
943static bool 944static bool
944intel_dp_set_link_train(struct intel_output *intel_output, 945intel_dp_set_link_train(struct intel_encoder *intel_encoder,
945 uint32_t dp_reg_value, 946 uint32_t dp_reg_value,
946 uint8_t dp_train_pat, 947 uint8_t dp_train_pat,
947 uint8_t train_set[4], 948 uint8_t train_set[4],
948 bool first) 949 bool first)
949{ 950{
950 struct drm_device *dev = intel_output->base.dev; 951 struct drm_device *dev = intel_encoder->base.dev;
951 struct drm_i915_private *dev_priv = dev->dev_private; 952 struct drm_i915_private *dev_priv = dev->dev_private;
952 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 953 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
953 int ret; 954 int ret;
954 955
955 I915_WRITE(dp_priv->output_reg, dp_reg_value); 956 I915_WRITE(dp_priv->output_reg, dp_reg_value);
@@ -957,11 +958,11 @@ intel_dp_set_link_train(struct intel_output *intel_output,
957 if (first) 958 if (first)
958 intel_wait_for_vblank(dev); 959 intel_wait_for_vblank(dev);
959 960
960 intel_dp_aux_native_write_1(intel_output, 961 intel_dp_aux_native_write_1(intel_encoder,
961 DP_TRAINING_PATTERN_SET, 962 DP_TRAINING_PATTERN_SET,
962 dp_train_pat); 963 dp_train_pat);
963 964
964 ret = intel_dp_aux_native_write(intel_output, 965 ret = intel_dp_aux_native_write(intel_encoder,
965 DP_TRAINING_LANE0_SET, train_set, 4); 966 DP_TRAINING_LANE0_SET, train_set, 4);
966 if (ret != 4) 967 if (ret != 4)
967 return false; 968 return false;
@@ -970,12 +971,12 @@ intel_dp_set_link_train(struct intel_output *intel_output,
970} 971}
971 972
972static void 973static void
973intel_dp_link_train(struct intel_output *intel_output, uint32_t DP, 974intel_dp_link_train(struct intel_encoder *intel_encoder, uint32_t DP,
974 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]) 975 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE])
975{ 976{
976 struct drm_device *dev = intel_output->base.dev; 977 struct drm_device *dev = intel_encoder->base.dev;
977 struct drm_i915_private *dev_priv = dev->dev_private; 978 struct drm_i915_private *dev_priv = dev->dev_private;
978 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 979 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
979 uint8_t train_set[4]; 980 uint8_t train_set[4];
980 uint8_t link_status[DP_LINK_STATUS_SIZE]; 981 uint8_t link_status[DP_LINK_STATUS_SIZE];
981 int i; 982 int i;
@@ -986,7 +987,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
986 int tries; 987 int tries;
987 988
988 /* Write the link configuration data */ 989 /* Write the link configuration data */
989 intel_dp_aux_native_write(intel_output, 0x100, 990 intel_dp_aux_native_write(intel_encoder, 0x100,
990 link_configuration, DP_LINK_CONFIGURATION_SIZE); 991 link_configuration, DP_LINK_CONFIGURATION_SIZE);
991 992
992 DP |= DP_PORT_EN; 993 DP |= DP_PORT_EN;
@@ -1000,14 +1001,14 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1000 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count); 1001 uint32_t signal_levels = intel_dp_signal_levels(train_set[0], dp_priv->lane_count);
1001 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1002 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1002 1003
1003 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_1, 1004 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_1,
1004 DP_TRAINING_PATTERN_1, train_set, first)) 1005 DP_TRAINING_PATTERN_1, train_set, first))
1005 break; 1006 break;
1006 first = false; 1007 first = false;
1007 /* Set training pattern 1 */ 1008 /* Set training pattern 1 */
1008 1009
1009 udelay(100); 1010 udelay(100);
1010 if (!intel_dp_get_link_status(intel_output, link_status)) 1011 if (!intel_dp_get_link_status(intel_encoder, link_status))
1011 break; 1012 break;
1012 1013
1013 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) { 1014 if (intel_clock_recovery_ok(link_status, dp_priv->lane_count)) {
@@ -1032,7 +1033,7 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1032 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1033 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1033 1034
1034 /* Compute new train_set as requested by target */ 1035 /* Compute new train_set as requested by target */
1035 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1036 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1036 } 1037 }
1037 1038
1038 /* channel equalization */ 1039 /* channel equalization */
@@ -1044,13 +1045,13 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1044 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1045 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1045 1046
1046 /* channel eq pattern */ 1047 /* channel eq pattern */
1047 if (!intel_dp_set_link_train(intel_output, DP | DP_LINK_TRAIN_PAT_2, 1048 if (!intel_dp_set_link_train(intel_encoder, DP | DP_LINK_TRAIN_PAT_2,
1048 DP_TRAINING_PATTERN_2, train_set, 1049 DP_TRAINING_PATTERN_2, train_set,
1049 false)) 1050 false))
1050 break; 1051 break;
1051 1052
1052 udelay(400); 1053 udelay(400);
1053 if (!intel_dp_get_link_status(intel_output, link_status)) 1054 if (!intel_dp_get_link_status(intel_encoder, link_status))
1054 break; 1055 break;
1055 1056
1056 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) { 1057 if (intel_channel_eq_ok(link_status, dp_priv->lane_count)) {
@@ -1063,26 +1064,26 @@ intel_dp_link_train(struct intel_output *intel_output, uint32_t DP,
1063 break; 1064 break;
1064 1065
1065 /* Compute new train_set as requested by target */ 1066 /* Compute new train_set as requested by target */
1066 intel_get_adjust_train(intel_output, link_status, dp_priv->lane_count, train_set); 1067 intel_get_adjust_train(intel_encoder, link_status, dp_priv->lane_count, train_set);
1067 ++tries; 1068 ++tries;
1068 } 1069 }
1069 1070
1070 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF); 1071 I915_WRITE(dp_priv->output_reg, DP | DP_LINK_TRAIN_OFF);
1071 POSTING_READ(dp_priv->output_reg); 1072 POSTING_READ(dp_priv->output_reg);
1072 intel_dp_aux_native_write_1(intel_output, 1073 intel_dp_aux_native_write_1(intel_encoder,
1073 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE); 1074 DP_TRAINING_PATTERN_SET, DP_TRAINING_PATTERN_DISABLE);
1074} 1075}
1075 1076
1076static void 1077static void
1077intel_dp_link_down(struct intel_output *intel_output, uint32_t DP) 1078intel_dp_link_down(struct intel_encoder *intel_encoder, uint32_t DP)
1078{ 1079{
1079 struct drm_device *dev = intel_output->base.dev; 1080 struct drm_device *dev = intel_encoder->base.dev;
1080 struct drm_i915_private *dev_priv = dev->dev_private; 1081 struct drm_i915_private *dev_priv = dev->dev_private;
1081 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1082 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1082 1083
1083 DRM_DEBUG_KMS("\n"); 1084 DRM_DEBUG_KMS("\n");
1084 1085
1085 if (IS_eDP(intel_output)) { 1086 if (IS_eDP(intel_encoder)) {
1086 DP &= ~DP_PLL_ENABLE; 1087 DP &= ~DP_PLL_ENABLE;
1087 I915_WRITE(dp_priv->output_reg, DP); 1088 I915_WRITE(dp_priv->output_reg, DP);
1088 POSTING_READ(dp_priv->output_reg); 1089 POSTING_READ(dp_priv->output_reg);
@@ -1095,7 +1096,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1095 1096
1096 udelay(17000); 1097 udelay(17000);
1097 1098
1098 if (IS_eDP(intel_output)) 1099 if (IS_eDP(intel_encoder))
1099 DP |= DP_LINK_TRAIN_OFF; 1100 DP |= DP_LINK_TRAIN_OFF;
1100 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN); 1101 I915_WRITE(dp_priv->output_reg, DP & ~DP_PORT_EN);
1101 POSTING_READ(dp_priv->output_reg); 1102 POSTING_READ(dp_priv->output_reg);
@@ -1104,13 +1105,13 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1104static void 1105static void
1105intel_dp_restore(struct drm_connector *connector) 1106intel_dp_restore(struct drm_connector *connector)
1106{ 1107{
1107 struct intel_output *intel_output = to_intel_output(connector); 1108 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1108 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1109 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1109 1110
1110 if (dp_priv->save_DP & DP_PORT_EN) 1111 if (dp_priv->save_DP & DP_PORT_EN)
1111 intel_dp_link_train(intel_output, dp_priv->save_DP, dp_priv->save_link_configuration); 1112 intel_dp_link_train(intel_encoder, dp_priv->save_DP, dp_priv->save_link_configuration);
1112 else 1113 else
1113 intel_dp_link_down(intel_output, dp_priv->save_DP); 1114 intel_dp_link_down(intel_encoder, dp_priv->save_DP);
1114} 1115}
1115 1116
1116/* 1117/*
@@ -1123,32 +1124,32 @@ intel_dp_restore(struct drm_connector *connector)
1123 */ 1124 */
1124 1125
1125static void 1126static void
1126intel_dp_check_link_status(struct intel_output *intel_output) 1127intel_dp_check_link_status(struct intel_encoder *intel_encoder)
1127{ 1128{
1128 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1129 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1129 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1130 uint8_t link_status[DP_LINK_STATUS_SIZE];
1130 1131
1131 if (!intel_output->enc.crtc) 1132 if (!intel_encoder->enc.crtc)
1132 return; 1133 return;
1133 1134
1134 if (!intel_dp_get_link_status(intel_output, link_status)) { 1135 if (!intel_dp_get_link_status(intel_encoder, link_status)) {
1135 intel_dp_link_down(intel_output, dp_priv->DP); 1136 intel_dp_link_down(intel_encoder, dp_priv->DP);
1136 return; 1137 return;
1137 } 1138 }
1138 1139
1139 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count)) 1140 if (!intel_channel_eq_ok(link_status, dp_priv->lane_count))
1140 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 1141 intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
1141} 1142}
1142 1143
1143static enum drm_connector_status 1144static enum drm_connector_status
1144ironlake_dp_detect(struct drm_connector *connector) 1145ironlake_dp_detect(struct drm_connector *connector)
1145{ 1146{
1146 struct intel_output *intel_output = to_intel_output(connector); 1147 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1147 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1148 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1148 enum drm_connector_status status; 1149 enum drm_connector_status status;
1149 1150
1150 status = connector_status_disconnected; 1151 status = connector_status_disconnected;
1151 if (intel_dp_aux_native_read(intel_output, 1152 if (intel_dp_aux_native_read(intel_encoder,
1152 0x000, dp_priv->dpcd, 1153 0x000, dp_priv->dpcd,
1153 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1154 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1154 { 1155 {
@@ -1167,10 +1168,10 @@ ironlake_dp_detect(struct drm_connector *connector)
1167static enum drm_connector_status 1168static enum drm_connector_status
1168intel_dp_detect(struct drm_connector *connector) 1169intel_dp_detect(struct drm_connector *connector)
1169{ 1170{
1170 struct intel_output *intel_output = to_intel_output(connector); 1171 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1171 struct drm_device *dev = intel_output->base.dev; 1172 struct drm_device *dev = intel_encoder->base.dev;
1172 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
1173 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1174 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1174 uint32_t temp, bit; 1175 uint32_t temp, bit;
1175 enum drm_connector_status status; 1176 enum drm_connector_status status;
1176 1177
@@ -1209,7 +1210,7 @@ intel_dp_detect(struct drm_connector *connector)
1209 return connector_status_disconnected; 1210 return connector_status_disconnected;
1210 1211
1211 status = connector_status_disconnected; 1212 status = connector_status_disconnected;
1212 if (intel_dp_aux_native_read(intel_output, 1213 if (intel_dp_aux_native_read(intel_encoder,
1213 0x000, dp_priv->dpcd, 1214 0x000, dp_priv->dpcd,
1214 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd)) 1215 sizeof (dp_priv->dpcd)) == sizeof (dp_priv->dpcd))
1215 { 1216 {
@@ -1221,20 +1222,20 @@ intel_dp_detect(struct drm_connector *connector)
1221 1222
1222static int intel_dp_get_modes(struct drm_connector *connector) 1223static int intel_dp_get_modes(struct drm_connector *connector)
1223{ 1224{
1224 struct intel_output *intel_output = to_intel_output(connector); 1225 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1225 struct drm_device *dev = intel_output->base.dev; 1226 struct drm_device *dev = intel_encoder->base.dev;
1226 struct drm_i915_private *dev_priv = dev->dev_private; 1227 struct drm_i915_private *dev_priv = dev->dev_private;
1227 int ret; 1228 int ret;
1228 1229
1229 /* We should parse the EDID data and find out if it has an audio sink 1230 /* We should parse the EDID data and find out if it has an audio sink
1230 */ 1231 */
1231 1232
1232 ret = intel_ddc_get_modes(intel_output); 1233 ret = intel_ddc_get_modes(intel_encoder);
1233 if (ret) 1234 if (ret)
1234 return ret; 1235 return ret;
1235 1236
1236 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 1237 /* if eDP has no EDID, try to use fixed panel mode from VBT */
1237 if (IS_eDP(intel_output)) { 1238 if (IS_eDP(intel_encoder)) {
1238 if (dev_priv->panel_fixed_mode != NULL) { 1239 if (dev_priv->panel_fixed_mode != NULL) {
1239 struct drm_display_mode *mode; 1240 struct drm_display_mode *mode;
1240 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 1241 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1248,13 +1249,13 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1248static void 1249static void
1249intel_dp_destroy (struct drm_connector *connector) 1250intel_dp_destroy (struct drm_connector *connector)
1250{ 1251{
1251 struct intel_output *intel_output = to_intel_output(connector); 1252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1252 1253
1253 if (intel_output->i2c_bus) 1254 if (intel_encoder->i2c_bus)
1254 intel_i2c_destroy(intel_output->i2c_bus); 1255 intel_i2c_destroy(intel_encoder->i2c_bus);
1255 drm_sysfs_connector_remove(connector); 1256 drm_sysfs_connector_remove(connector);
1256 drm_connector_cleanup(connector); 1257 drm_connector_cleanup(connector);
1257 kfree(intel_output); 1258 kfree(intel_encoder);
1258} 1259}
1259 1260
1260static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 1261static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -1290,12 +1291,12 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
1290}; 1291};
1291 1292
1292void 1293void
1293intel_dp_hot_plug(struct intel_output *intel_output) 1294intel_dp_hot_plug(struct intel_encoder *intel_encoder)
1294{ 1295{
1295 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1296 struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
1296 1297
1297 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1298 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1298 intel_dp_check_link_status(intel_output); 1299 intel_dp_check_link_status(intel_encoder);
1299} 1300}
1300 1301
1301void 1302void
@@ -1303,53 +1304,53 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1303{ 1304{
1304 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct drm_i915_private *dev_priv = dev->dev_private;
1305 struct drm_connector *connector; 1306 struct drm_connector *connector;
1306 struct intel_output *intel_output; 1307 struct intel_encoder *intel_encoder;
1307 struct intel_dp_priv *dp_priv; 1308 struct intel_dp_priv *dp_priv;
1308 const char *name = NULL; 1309 const char *name = NULL;
1309 1310
1310 intel_output = kcalloc(sizeof(struct intel_output) + 1311 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
1311 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1312 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1312 if (!intel_output) 1313 if (!intel_encoder)
1313 return; 1314 return;
1314 1315
1315 dp_priv = (struct intel_dp_priv *)(intel_output + 1); 1316 dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
1316 1317
1317 connector = &intel_output->base; 1318 connector = &intel_encoder->base;
1318 drm_connector_init(dev, connector, &intel_dp_connector_funcs, 1319 drm_connector_init(dev, connector, &intel_dp_connector_funcs,
1319 DRM_MODE_CONNECTOR_DisplayPort); 1320 DRM_MODE_CONNECTOR_DisplayPort);
1320 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 1321 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
1321 1322
1322 if (output_reg == DP_A) 1323 if (output_reg == DP_A)
1323 intel_output->type = INTEL_OUTPUT_EDP; 1324 intel_encoder->type = INTEL_OUTPUT_EDP;
1324 else 1325 else
1325 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1326 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
1326 1327
1327 if (output_reg == DP_B || output_reg == PCH_DP_B) 1328 if (output_reg == DP_B || output_reg == PCH_DP_B)
1328 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1329 intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1329 else if (output_reg == DP_C || output_reg == PCH_DP_C) 1330 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1330 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1331 intel_encoder->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1331 else if (output_reg == DP_D || output_reg == PCH_DP_D) 1332 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1332 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1333 intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1333 1334
1334 if (IS_eDP(intel_output)) 1335 if (IS_eDP(intel_encoder))
1335 intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); 1336 intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT);
1336 1337
1337 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1338 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1338 connector->interlace_allowed = true; 1339 connector->interlace_allowed = true;
1339 connector->doublescan_allowed = 0; 1340 connector->doublescan_allowed = 0;
1340 1341
1341 dp_priv->intel_output = intel_output; 1342 dp_priv->intel_encoder = intel_encoder;
1342 dp_priv->output_reg = output_reg; 1343 dp_priv->output_reg = output_reg;
1343 dp_priv->has_audio = false; 1344 dp_priv->has_audio = false;
1344 dp_priv->dpms_mode = DRM_MODE_DPMS_ON; 1345 dp_priv->dpms_mode = DRM_MODE_DPMS_ON;
1345 intel_output->dev_priv = dp_priv; 1346 intel_encoder->dev_priv = dp_priv;
1346 1347
1347 drm_encoder_init(dev, &intel_output->enc, &intel_dp_enc_funcs, 1348 drm_encoder_init(dev, &intel_encoder->enc, &intel_dp_enc_funcs,
1348 DRM_MODE_ENCODER_TMDS); 1349 DRM_MODE_ENCODER_TMDS);
1349 drm_encoder_helper_add(&intel_output->enc, &intel_dp_helper_funcs); 1350 drm_encoder_helper_add(&intel_encoder->enc, &intel_dp_helper_funcs);
1350 1351
1351 drm_mode_connector_attach_encoder(&intel_output->base, 1352 drm_mode_connector_attach_encoder(&intel_encoder->base,
1352 &intel_output->enc); 1353 &intel_encoder->enc);
1353 drm_sysfs_connector_add(connector); 1354 drm_sysfs_connector_add(connector);
1354 1355
1355 /* Set up the DDC bus. */ 1356 /* Set up the DDC bus. */
@@ -1377,10 +1378,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1377 break; 1378 break;
1378 } 1379 }
1379 1380
1380 intel_dp_i2c_init(intel_output, name); 1381 intel_dp_i2c_init(intel_encoder, name);
1381 1382
1382 intel_output->ddc_bus = &dp_priv->adapter; 1383 intel_encoder->ddc_bus = &dp_priv->adapter;
1383 intel_output->hot_plug = intel_dp_hot_plug; 1384 intel_encoder->hot_plug = intel_dp_hot_plug;
1384 1385
1385 if (output_reg == DP_A) { 1386 if (output_reg == DP_A) {
1386 /* initialize panel mode from VBT if available for eDP */ 1387 /* initialize panel mode from VBT if available for eDP */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 3a467ca57857..e30253755f12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -95,7 +95,7 @@ struct intel_framebuffer {
95}; 95};
96 96
97 97
98struct intel_output { 98struct intel_encoder {
99 struct drm_connector base; 99 struct drm_connector base;
100 100
101 struct drm_encoder enc; 101 struct drm_encoder enc;
@@ -105,7 +105,7 @@ struct intel_output {
105 bool load_detect_temp; 105 bool load_detect_temp;
106 bool needs_tv_clock; 106 bool needs_tv_clock;
107 void *dev_priv; 107 void *dev_priv;
108 void (*hot_plug)(struct intel_output *); 108 void (*hot_plug)(struct intel_encoder *);
109 int crtc_mask; 109 int crtc_mask;
110 int clone_mask; 110 int clone_mask;
111}; 111};
@@ -152,15 +152,15 @@ struct intel_crtc {
152}; 152};
153 153
154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
155#define to_intel_output(x) container_of(x, struct intel_output, base) 155#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
156#define enc_to_intel_output(x) container_of(x, struct intel_output, enc) 156#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) 157#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
158 158
159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg, 159struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
160 const char *name); 160 const char *name);
161void intel_i2c_destroy(struct i2c_adapter *adapter); 161void intel_i2c_destroy(struct i2c_adapter *adapter);
162int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_encoder *intel_encoder);
163extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
164void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev); 165void intel_i2c_reset_gmbus(struct drm_device *dev);
166 166
@@ -175,7 +175,7 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
175void 175void
176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 176intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
177 struct drm_display_mode *adjusted_mode); 177 struct drm_display_mode *adjusted_mode);
178extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
179 179
180 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev); 181extern int intel_panel_fitter_pipe (struct drm_device *dev);
@@ -191,10 +191,10 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
191 struct drm_file *file_priv); 191 struct drm_file *file_priv);
192extern void intel_wait_for_vblank(struct drm_device *dev); 192extern void intel_wait_for_vblank(struct drm_device *dev);
193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); 193extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output, 194extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
195 struct drm_display_mode *mode, 195 struct drm_display_mode *mode,
196 int *dpms_mode); 196 int *dpms_mode);
197extern void intel_release_load_detect_pipe(struct intel_output *intel_output, 197extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
198 int dpms_mode); 198 int dpms_mode);
199 199
200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); 200extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a4d2606de778..ebf213c96b9c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -25,6 +25,7 @@
25 * Eric Anholt <eric@anholt.net> 25 * Eric Anholt <eric@anholt.net>
26 */ 26 */
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/slab.h>
28#include "drmP.h" 29#include "drmP.h"
29#include "drm.h" 30#include "drm.h"
30#include "drm_crtc.h" 31#include "drm_crtc.h"
@@ -79,8 +80,8 @@ static struct intel_dvo_device intel_dvo_devices[] = {
79static void intel_dvo_dpms(struct drm_encoder *encoder, int mode) 80static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
80{ 81{
81 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 82 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
82 struct intel_output *intel_output = enc_to_intel_output(encoder); 83 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
83 struct intel_dvo_device *dvo = intel_output->dev_priv; 84 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
84 u32 dvo_reg = dvo->dvo_reg; 85 u32 dvo_reg = dvo->dvo_reg;
85 u32 temp = I915_READ(dvo_reg); 86 u32 temp = I915_READ(dvo_reg);
86 87
@@ -98,8 +99,8 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
98static void intel_dvo_save(struct drm_connector *connector) 99static void intel_dvo_save(struct drm_connector *connector)
99{ 100{
100 struct drm_i915_private *dev_priv = connector->dev->dev_private; 101 struct drm_i915_private *dev_priv = connector->dev->dev_private;
101 struct intel_output *intel_output = to_intel_output(connector); 102 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
102 struct intel_dvo_device *dvo = intel_output->dev_priv; 103 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
103 104
104 /* Each output should probably just save the registers it touches, 105 /* Each output should probably just save the registers it touches,
105 * but for now, use more overkill. 106 * but for now, use more overkill.
@@ -114,8 +115,8 @@ static void intel_dvo_save(struct drm_connector *connector)
114static void intel_dvo_restore(struct drm_connector *connector) 115static void intel_dvo_restore(struct drm_connector *connector)
115{ 116{
116 struct drm_i915_private *dev_priv = connector->dev->dev_private; 117 struct drm_i915_private *dev_priv = connector->dev->dev_private;
117 struct intel_output *intel_output = to_intel_output(connector); 118 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
118 struct intel_dvo_device *dvo = intel_output->dev_priv; 119 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
119 120
120 dvo->dev_ops->restore(dvo); 121 dvo->dev_ops->restore(dvo);
121 122
@@ -127,8 +128,8 @@ static void intel_dvo_restore(struct drm_connector *connector)
127static int intel_dvo_mode_valid(struct drm_connector *connector, 128static int intel_dvo_mode_valid(struct drm_connector *connector,
128 struct drm_display_mode *mode) 129 struct drm_display_mode *mode)
129{ 130{
130 struct intel_output *intel_output = to_intel_output(connector); 131 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
131 struct intel_dvo_device *dvo = intel_output->dev_priv; 132 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
132 133
133 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 134 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
134 return MODE_NO_DBLESCAN; 135 return MODE_NO_DBLESCAN;
@@ -149,8 +150,8 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
149 struct drm_display_mode *mode, 150 struct drm_display_mode *mode,
150 struct drm_display_mode *adjusted_mode) 151 struct drm_display_mode *adjusted_mode)
151{ 152{
152 struct intel_output *intel_output = enc_to_intel_output(encoder); 153 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
153 struct intel_dvo_device *dvo = intel_output->dev_priv; 154 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
154 155
155 /* If we have timings from the BIOS for the panel, put them in 156 /* If we have timings from the BIOS for the panel, put them in
156 * to the adjusted mode. The CRTC will be set up for this mode, 157 * to the adjusted mode. The CRTC will be set up for this mode,
@@ -185,8 +186,8 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
185 struct drm_device *dev = encoder->dev; 186 struct drm_device *dev = encoder->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private; 187 struct drm_i915_private *dev_priv = dev->dev_private;
187 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 188 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
188 struct intel_output *intel_output = enc_to_intel_output(encoder); 189 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
189 struct intel_dvo_device *dvo = intel_output->dev_priv; 190 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
190 int pipe = intel_crtc->pipe; 191 int pipe = intel_crtc->pipe;
191 u32 dvo_val; 192 u32 dvo_val;
192 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg; 193 u32 dvo_reg = dvo->dvo_reg, dvo_srcdim_reg;
@@ -240,23 +241,23 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
240 */ 241 */
241static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector) 242static enum drm_connector_status intel_dvo_detect(struct drm_connector *connector)
242{ 243{
243 struct intel_output *intel_output = to_intel_output(connector); 244 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
244 struct intel_dvo_device *dvo = intel_output->dev_priv; 245 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
245 246
246 return dvo->dev_ops->detect(dvo); 247 return dvo->dev_ops->detect(dvo);
247} 248}
248 249
249static int intel_dvo_get_modes(struct drm_connector *connector) 250static int intel_dvo_get_modes(struct drm_connector *connector)
250{ 251{
251 struct intel_output *intel_output = to_intel_output(connector); 252 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
252 struct intel_dvo_device *dvo = intel_output->dev_priv; 253 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
253 254
254 /* We should probably have an i2c driver get_modes function for those 255 /* We should probably have an i2c driver get_modes function for those
255 * devices which will have a fixed set of modes determined by the chip 256 * devices which will have a fixed set of modes determined by the chip
256 * (TV-out, for example), but for now with just TMDS and LVDS, 257 * (TV-out, for example), but for now with just TMDS and LVDS,
257 * that's not the case. 258 * that's not the case.
258 */ 259 */
259 intel_ddc_get_modes(intel_output); 260 intel_ddc_get_modes(intel_encoder);
260 if (!list_empty(&connector->probed_modes)) 261 if (!list_empty(&connector->probed_modes))
261 return 1; 262 return 1;
262 263
@@ -274,8 +275,8 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
274 275
275static void intel_dvo_destroy (struct drm_connector *connector) 276static void intel_dvo_destroy (struct drm_connector *connector)
276{ 277{
277 struct intel_output *intel_output = to_intel_output(connector); 278 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
278 struct intel_dvo_device *dvo = intel_output->dev_priv; 279 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
279 280
280 if (dvo) { 281 if (dvo) {
281 if (dvo->dev_ops->destroy) 282 if (dvo->dev_ops->destroy)
@@ -285,13 +286,13 @@ static void intel_dvo_destroy (struct drm_connector *connector)
285 /* no need, in i830_dvoices[] now */ 286 /* no need, in i830_dvoices[] now */
286 //kfree(dvo); 287 //kfree(dvo);
287 } 288 }
288 if (intel_output->i2c_bus) 289 if (intel_encoder->i2c_bus)
289 intel_i2c_destroy(intel_output->i2c_bus); 290 intel_i2c_destroy(intel_encoder->i2c_bus);
290 if (intel_output->ddc_bus) 291 if (intel_encoder->ddc_bus)
291 intel_i2c_destroy(intel_output->ddc_bus); 292 intel_i2c_destroy(intel_encoder->ddc_bus);
292 drm_sysfs_connector_remove(connector); 293 drm_sysfs_connector_remove(connector);
293 drm_connector_cleanup(connector); 294 drm_connector_cleanup(connector);
294 kfree(intel_output); 295 kfree(intel_encoder);
295} 296}
296 297
297#ifdef RANDR_GET_CRTC_INTERFACE 298#ifdef RANDR_GET_CRTC_INTERFACE
@@ -299,8 +300,8 @@ static struct drm_crtc *intel_dvo_get_crtc(struct drm_connector *connector)
299{ 300{
300 struct drm_device *dev = connector->dev; 301 struct drm_device *dev = connector->dev;
301 struct drm_i915_private *dev_priv = dev->dev_private; 302 struct drm_i915_private *dev_priv = dev->dev_private;
302 struct intel_output *intel_output = to_intel_output(connector); 303 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
303 struct intel_dvo_device *dvo = intel_output->dev_priv; 304 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
304 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT); 305 int pipe = !!(I915_READ(dvo->dvo_reg) & SDVO_PIPE_B_SELECT);
305 306
306 return intel_pipe_to_crtc(pScrn, pipe); 307 return intel_pipe_to_crtc(pScrn, pipe);
@@ -351,8 +352,8 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
351{ 352{
352 struct drm_device *dev = connector->dev; 353 struct drm_device *dev = connector->dev;
353 struct drm_i915_private *dev_priv = dev->dev_private; 354 struct drm_i915_private *dev_priv = dev->dev_private;
354 struct intel_output *intel_output = to_intel_output(connector); 355 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
355 struct intel_dvo_device *dvo = intel_output->dev_priv; 356 struct intel_dvo_device *dvo = intel_encoder->dev_priv;
356 uint32_t dvo_reg = dvo->dvo_reg; 357 uint32_t dvo_reg = dvo->dvo_reg;
357 uint32_t dvo_val = I915_READ(dvo_reg); 358 uint32_t dvo_val = I915_READ(dvo_reg);
358 struct drm_display_mode *mode = NULL; 359 struct drm_display_mode *mode = NULL;
@@ -382,24 +383,24 @@ intel_dvo_get_current_mode (struct drm_connector *connector)
382 383
383void intel_dvo_init(struct drm_device *dev) 384void intel_dvo_init(struct drm_device *dev)
384{ 385{
385 struct intel_output *intel_output; 386 struct intel_encoder *intel_encoder;
386 struct intel_dvo_device *dvo; 387 struct intel_dvo_device *dvo;
387 struct i2c_adapter *i2cbus = NULL; 388 struct i2c_adapter *i2cbus = NULL;
388 int ret = 0; 389 int ret = 0;
389 int i; 390 int i;
390 int encoder_type = DRM_MODE_ENCODER_NONE; 391 int encoder_type = DRM_MODE_ENCODER_NONE;
391 intel_output = kzalloc (sizeof(struct intel_output), GFP_KERNEL); 392 intel_encoder = kzalloc (sizeof(struct intel_encoder), GFP_KERNEL);
392 if (!intel_output) 393 if (!intel_encoder)
393 return; 394 return;
394 395
395 /* Set up the DDC bus */ 396 /* Set up the DDC bus */
396 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D"); 397 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
397 if (!intel_output->ddc_bus) 398 if (!intel_encoder->ddc_bus)
398 goto free_intel; 399 goto free_intel;
399 400
400 /* Now, try to find a controller */ 401 /* Now, try to find a controller */
401 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 402 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
402 struct drm_connector *connector = &intel_output->base; 403 struct drm_connector *connector = &intel_encoder->base;
403 int gpio; 404 int gpio;
404 405
405 dvo = &intel_dvo_devices[i]; 406 dvo = &intel_dvo_devices[i];
@@ -434,11 +435,11 @@ void intel_dvo_init(struct drm_device *dev)
434 if (!ret) 435 if (!ret)
435 continue; 436 continue;
436 437
437 intel_output->type = INTEL_OUTPUT_DVO; 438 intel_encoder->type = INTEL_OUTPUT_DVO;
438 intel_output->crtc_mask = (1 << 0) | (1 << 1); 439 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
439 switch (dvo->type) { 440 switch (dvo->type) {
440 case INTEL_DVO_CHIP_TMDS: 441 case INTEL_DVO_CHIP_TMDS:
441 intel_output->clone_mask = 442 intel_encoder->clone_mask =
442 (1 << INTEL_DVO_TMDS_CLONE_BIT) | 443 (1 << INTEL_DVO_TMDS_CLONE_BIT) |
443 (1 << INTEL_ANALOG_CLONE_BIT); 444 (1 << INTEL_ANALOG_CLONE_BIT);
444 drm_connector_init(dev, connector, 445 drm_connector_init(dev, connector,
@@ -447,7 +448,7 @@ void intel_dvo_init(struct drm_device *dev)
447 encoder_type = DRM_MODE_ENCODER_TMDS; 448 encoder_type = DRM_MODE_ENCODER_TMDS;
448 break; 449 break;
449 case INTEL_DVO_CHIP_LVDS: 450 case INTEL_DVO_CHIP_LVDS:
450 intel_output->clone_mask = 451 intel_encoder->clone_mask =
451 (1 << INTEL_DVO_LVDS_CLONE_BIT); 452 (1 << INTEL_DVO_LVDS_CLONE_BIT);
452 drm_connector_init(dev, connector, 453 drm_connector_init(dev, connector,
453 &intel_dvo_connector_funcs, 454 &intel_dvo_connector_funcs,
@@ -462,16 +463,16 @@ void intel_dvo_init(struct drm_device *dev)
462 connector->interlace_allowed = false; 463 connector->interlace_allowed = false;
463 connector->doublescan_allowed = false; 464 connector->doublescan_allowed = false;
464 465
465 intel_output->dev_priv = dvo; 466 intel_encoder->dev_priv = dvo;
466 intel_output->i2c_bus = i2cbus; 467 intel_encoder->i2c_bus = i2cbus;
467 468
468 drm_encoder_init(dev, &intel_output->enc, 469 drm_encoder_init(dev, &intel_encoder->enc,
469 &intel_dvo_enc_funcs, encoder_type); 470 &intel_dvo_enc_funcs, encoder_type);
470 drm_encoder_helper_add(&intel_output->enc, 471 drm_encoder_helper_add(&intel_encoder->enc,
471 &intel_dvo_helper_funcs); 472 &intel_dvo_helper_funcs);
472 473
473 drm_mode_connector_attach_encoder(&intel_output->base, 474 drm_mode_connector_attach_encoder(&intel_encoder->base,
474 &intel_output->enc); 475 &intel_encoder->enc);
475 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 476 if (dvo->type == INTEL_DVO_CHIP_LVDS) {
476 /* For our LVDS chipsets, we should hopefully be able 477 /* For our LVDS chipsets, we should hopefully be able
477 * to dig the fixed panel mode out of the BIOS data. 478 * to dig the fixed panel mode out of the BIOS data.
@@ -489,10 +490,10 @@ void intel_dvo_init(struct drm_device *dev)
489 return; 490 return;
490 } 491 }
491 492
492 intel_i2c_destroy(intel_output->ddc_bus); 493 intel_i2c_destroy(intel_encoder->ddc_bus);
493 /* Didn't find a chip, so tear down. */ 494 /* Didn't find a chip, so tear down. */
494 if (i2cbus != NULL) 495 if (i2cbus != NULL)
495 intel_i2c_destroy(i2cbus); 496 intel_i2c_destroy(i2cbus);
496free_intel: 497free_intel:
497 kfree(intel_output); 498 kfree(intel_encoder);
498} 499}
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 8cd791dc5b29..8a0b3bcdc7b1 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -30,7 +30,6 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/tty.h> 32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h> 33#include <linux/sysrq.h>
35#include <linux/delay.h> 34#include <linux/delay.h>
36#include <linux/fb.h> 35#include <linux/fb.h>
@@ -145,7 +144,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
145 ret = -ENOMEM; 144 ret = -ENOMEM;
146 goto out; 145 goto out;
147 } 146 }
148 obj_priv = fbo->driver_private; 147 obj_priv = to_intel_bo(fbo);
149 148
150 mutex_lock(&dev->struct_mutex); 149 mutex_lock(&dev->struct_mutex);
151 150
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a30f8bfc1985..48cade0cf7b1 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/slab.h>
30#include <linux/delay.h> 31#include <linux/delay.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "drm.h" 33#include "drm.h"
@@ -50,8 +51,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
50 struct drm_i915_private *dev_priv = dev->dev_private; 51 struct drm_i915_private *dev_priv = dev->dev_private;
51 struct drm_crtc *crtc = encoder->crtc; 52 struct drm_crtc *crtc = encoder->crtc;
52 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 53 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
53 struct intel_output *intel_output = enc_to_intel_output(encoder); 54 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
54 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 55 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
55 u32 sdvox; 56 u32 sdvox;
56 57
57 sdvox = SDVO_ENCODING_HDMI | 58 sdvox = SDVO_ENCODING_HDMI |
@@ -73,8 +74,8 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
73{ 74{
74 struct drm_device *dev = encoder->dev; 75 struct drm_device *dev = encoder->dev;
75 struct drm_i915_private *dev_priv = dev->dev_private; 76 struct drm_i915_private *dev_priv = dev->dev_private;
76 struct intel_output *intel_output = enc_to_intel_output(encoder); 77 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
77 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 78 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
78 u32 temp; 79 u32 temp;
79 80
80 temp = I915_READ(hdmi_priv->sdvox_reg); 81 temp = I915_READ(hdmi_priv->sdvox_reg);
@@ -109,8 +110,8 @@ static void intel_hdmi_save(struct drm_connector *connector)
109{ 110{
110 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
111 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = dev->dev_private;
112 struct intel_output *intel_output = to_intel_output(connector); 113 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
113 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 114 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
114 115
115 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg); 116 hdmi_priv->save_SDVOX = I915_READ(hdmi_priv->sdvox_reg);
116} 117}
@@ -119,8 +120,8 @@ static void intel_hdmi_restore(struct drm_connector *connector)
119{ 120{
120 struct drm_device *dev = connector->dev; 121 struct drm_device *dev = connector->dev;
121 struct drm_i915_private *dev_priv = dev->dev_private; 122 struct drm_i915_private *dev_priv = dev->dev_private;
122 struct intel_output *intel_output = to_intel_output(connector); 123 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
123 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 124 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
124 125
125 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX); 126 I915_WRITE(hdmi_priv->sdvox_reg, hdmi_priv->save_SDVOX);
126 POSTING_READ(hdmi_priv->sdvox_reg); 127 POSTING_READ(hdmi_priv->sdvox_reg);
@@ -150,21 +151,21 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
150static enum drm_connector_status 151static enum drm_connector_status
151intel_hdmi_detect(struct drm_connector *connector) 152intel_hdmi_detect(struct drm_connector *connector)
152{ 153{
153 struct intel_output *intel_output = to_intel_output(connector); 154 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
154 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 155 struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
155 struct edid *edid = NULL; 156 struct edid *edid = NULL;
156 enum drm_connector_status status = connector_status_disconnected; 157 enum drm_connector_status status = connector_status_disconnected;
157 158
158 hdmi_priv->has_hdmi_sink = false; 159 hdmi_priv->has_hdmi_sink = false;
159 edid = drm_get_edid(&intel_output->base, 160 edid = drm_get_edid(&intel_encoder->base,
160 intel_output->ddc_bus); 161 intel_encoder->ddc_bus);
161 162
162 if (edid) { 163 if (edid) {
163 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 164 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
164 status = connector_status_connected; 165 status = connector_status_connected;
165 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); 166 hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
166 } 167 }
167 intel_output->base.display_info.raw_edid = NULL; 168 intel_encoder->base.display_info.raw_edid = NULL;
168 kfree(edid); 169 kfree(edid);
169 } 170 }
170 171
@@ -173,24 +174,24 @@ intel_hdmi_detect(struct drm_connector *connector)
173 174
174static int intel_hdmi_get_modes(struct drm_connector *connector) 175static int intel_hdmi_get_modes(struct drm_connector *connector)
175{ 176{
176 struct intel_output *intel_output = to_intel_output(connector); 177 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
177 178
178 /* We should parse the EDID data and find out if it's an HDMI sink so 179 /* We should parse the EDID data and find out if it's an HDMI sink so
179 * we can send audio to it. 180 * we can send audio to it.
180 */ 181 */
181 182
182 return intel_ddc_get_modes(intel_output); 183 return intel_ddc_get_modes(intel_encoder);
183} 184}
184 185
185static void intel_hdmi_destroy(struct drm_connector *connector) 186static void intel_hdmi_destroy(struct drm_connector *connector)
186{ 187{
187 struct intel_output *intel_output = to_intel_output(connector); 188 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
188 189
189 if (intel_output->i2c_bus) 190 if (intel_encoder->i2c_bus)
190 intel_i2c_destroy(intel_output->i2c_bus); 191 intel_i2c_destroy(intel_encoder->i2c_bus);
191 drm_sysfs_connector_remove(connector); 192 drm_sysfs_connector_remove(connector);
192 drm_connector_cleanup(connector); 193 drm_connector_cleanup(connector);
193 kfree(intel_output); 194 kfree(intel_encoder);
194} 195}
195 196
196static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 197static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
@@ -229,63 +230,63 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
229{ 230{
230 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct drm_i915_private *dev_priv = dev->dev_private;
231 struct drm_connector *connector; 232 struct drm_connector *connector;
232 struct intel_output *intel_output; 233 struct intel_encoder *intel_encoder;
233 struct intel_hdmi_priv *hdmi_priv; 234 struct intel_hdmi_priv *hdmi_priv;
234 235
235 intel_output = kcalloc(sizeof(struct intel_output) + 236 intel_encoder = kcalloc(sizeof(struct intel_encoder) +
236 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
237 if (!intel_output) 238 if (!intel_encoder)
238 return; 239 return;
239 hdmi_priv = (struct intel_hdmi_priv *)(intel_output + 1); 240 hdmi_priv = (struct intel_hdmi_priv *)(intel_encoder + 1);
240 241
241 connector = &intel_output->base; 242 connector = &intel_encoder->base;
242 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 243 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
243 DRM_MODE_CONNECTOR_HDMIA); 244 DRM_MODE_CONNECTOR_HDMIA);
244 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 245 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
245 246
246 intel_output->type = INTEL_OUTPUT_HDMI; 247 intel_encoder->type = INTEL_OUTPUT_HDMI;
247 248
248 connector->interlace_allowed = 0; 249 connector->interlace_allowed = 0;
249 connector->doublescan_allowed = 0; 250 connector->doublescan_allowed = 0;
250 intel_output->crtc_mask = (1 << 0) | (1 << 1); 251 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
251 252
252 /* Set up the DDC bus. */ 253 /* Set up the DDC bus. */
253 if (sdvox_reg == SDVOB) { 254 if (sdvox_reg == SDVOB) {
254 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 255 intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
255 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 256 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
256 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 257 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
257 } else if (sdvox_reg == SDVOC) { 258 } else if (sdvox_reg == SDVOC) {
258 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 259 intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
259 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 260 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
260 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 261 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
261 } else if (sdvox_reg == HDMIB) { 262 } else if (sdvox_reg == HDMIB) {
262 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 263 intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
263 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 264 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
264 "HDMIB"); 265 "HDMIB");
265 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 266 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
266 } else if (sdvox_reg == HDMIC) { 267 } else if (sdvox_reg == HDMIC) {
267 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 268 intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
268 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 269 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
269 "HDMIC"); 270 "HDMIC");
270 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 271 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
271 } else if (sdvox_reg == HDMID) { 272 } else if (sdvox_reg == HDMID) {
272 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 273 intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
273 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 274 intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
274 "HDMID"); 275 "HDMID");
275 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 276 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
276 } 277 }
277 if (!intel_output->ddc_bus) 278 if (!intel_encoder->ddc_bus)
278 goto err_connector; 279 goto err_connector;
279 280
280 hdmi_priv->sdvox_reg = sdvox_reg; 281 hdmi_priv->sdvox_reg = sdvox_reg;
281 intel_output->dev_priv = hdmi_priv; 282 intel_encoder->dev_priv = hdmi_priv;
282 283
283 drm_encoder_init(dev, &intel_output->enc, &intel_hdmi_enc_funcs, 284 drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
284 DRM_MODE_ENCODER_TMDS); 285 DRM_MODE_ENCODER_TMDS);
285 drm_encoder_helper_add(&intel_output->enc, &intel_hdmi_helper_funcs); 286 drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
286 287
287 drm_mode_connector_attach_encoder(&intel_output->base, 288 drm_mode_connector_attach_encoder(&intel_encoder->base,
288 &intel_output->enc); 289 &intel_encoder->enc);
289 drm_sysfs_connector_add(connector); 290 drm_sysfs_connector_add(connector);
290 291
291 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 292 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -301,7 +302,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
301 302
302err_connector: 303err_connector:
303 drm_connector_cleanup(connector); 304 drm_connector_cleanup(connector);
304 kfree(intel_output); 305 kfree(intel_encoder);
305 306
306 return; 307 return;
307} 308}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index fcc753ca5d94..c2649c7df14c 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -26,6 +26,7 @@
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 */ 27 */
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include <linux/i2c-id.h> 30#include <linux/i2c-id.h>
30#include <linux/i2c-algo-bit.h> 31#include <linux/i2c-algo-bit.h>
31#include "drmP.h" 32#include "drmP.h"
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 2b3fa7a3c028..b66806a37d37 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -30,6 +30,7 @@
30#include <acpi/button.h> 30#include <acpi/button.h>
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/slab.h>
33#include "drmP.h" 34#include "drmP.h"
34#include "drm.h" 35#include "drm.h"
35#include "drm_crtc.h" 36#include "drm_crtc.h"
@@ -238,8 +239,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
238 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 240 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
240 struct drm_encoder *tmp_encoder; 241 struct drm_encoder *tmp_encoder;
241 struct intel_output *intel_output = enc_to_intel_output(encoder); 242 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
242 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 243 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
243 u32 pfit_control = 0, pfit_pgm_ratios = 0; 244 u32 pfit_control = 0, pfit_pgm_ratios = 0;
244 int left_border = 0, right_border = 0, top_border = 0; 245 int left_border = 0, right_border = 0, top_border = 0;
245 int bottom_border = 0; 246 int bottom_border = 0;
@@ -586,8 +587,8 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
586{ 587{
587 struct drm_device *dev = encoder->dev; 588 struct drm_device *dev = encoder->dev;
588 struct drm_i915_private *dev_priv = dev->dev_private; 589 struct drm_i915_private *dev_priv = dev->dev_private;
589 struct intel_output *intel_output = enc_to_intel_output(encoder); 590 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
590 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 591 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
591 592
592 /* 593 /*
593 * The LVDS pin pair will already have been turned on in the 594 * The LVDS pin pair will already have been turned on in the
@@ -634,14 +635,16 @@ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connect
634static int intel_lvds_get_modes(struct drm_connector *connector) 635static int intel_lvds_get_modes(struct drm_connector *connector)
635{ 636{
636 struct drm_device *dev = connector->dev; 637 struct drm_device *dev = connector->dev;
637 struct intel_output *intel_output = to_intel_output(connector); 638 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
638 struct drm_i915_private *dev_priv = dev->dev_private; 639 struct drm_i915_private *dev_priv = dev->dev_private;
639 int ret = 0; 640 int ret = 0;
640 641
641 ret = intel_ddc_get_modes(intel_output); 642 if (dev_priv->lvds_edid_good) {
643 ret = intel_ddc_get_modes(intel_encoder);
642 644
643 if (ret) 645 if (ret)
644 return ret; 646 return ret;
647 }
645 648
646 /* Didn't get an EDID, so 649 /* Didn't get an EDID, so
647 * Set wide sync ranges so we get all modes 650 * Set wide sync ranges so we get all modes
@@ -714,11 +717,11 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
714static void intel_lvds_destroy(struct drm_connector *connector) 717static void intel_lvds_destroy(struct drm_connector *connector)
715{ 718{
716 struct drm_device *dev = connector->dev; 719 struct drm_device *dev = connector->dev;
717 struct intel_output *intel_output = to_intel_output(connector); 720 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
718 struct drm_i915_private *dev_priv = dev->dev_private; 721 struct drm_i915_private *dev_priv = dev->dev_private;
719 722
720 if (intel_output->ddc_bus) 723 if (intel_encoder->ddc_bus)
721 intel_i2c_destroy(intel_output->ddc_bus); 724 intel_i2c_destroy(intel_encoder->ddc_bus);
722 if (dev_priv->lid_notifier.notifier_call) 725 if (dev_priv->lid_notifier.notifier_call)
723 acpi_lid_notifier_unregister(&dev_priv->lid_notifier); 726 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
724 drm_sysfs_connector_remove(connector); 727 drm_sysfs_connector_remove(connector);
@@ -731,13 +734,13 @@ static int intel_lvds_set_property(struct drm_connector *connector,
731 uint64_t value) 734 uint64_t value)
732{ 735{
733 struct drm_device *dev = connector->dev; 736 struct drm_device *dev = connector->dev;
734 struct intel_output *intel_output = 737 struct intel_encoder *intel_encoder =
735 to_intel_output(connector); 738 to_intel_encoder(connector);
736 739
737 if (property == dev->mode_config.scaling_mode_property && 740 if (property == dev->mode_config.scaling_mode_property &&
738 connector->encoder) { 741 connector->encoder) {
739 struct drm_crtc *crtc = connector->encoder->crtc; 742 struct drm_crtc *crtc = connector->encoder->crtc;
740 struct intel_lvds_priv *lvds_priv = intel_output->dev_priv; 743 struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
741 if (value == DRM_MODE_SCALE_NONE) { 744 if (value == DRM_MODE_SCALE_NONE) {
742 DRM_DEBUG_KMS("no scaling not supported\n"); 745 DRM_DEBUG_KMS("no scaling not supported\n");
743 return 0; 746 return 0;
@@ -857,6 +860,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
857 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"), 860 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
858 }, 861 },
859 }, 862 },
863 {
864 .callback = intel_no_lvds_dmi_callback,
865 .ident = "Clientron U800",
866 .matches = {
867 DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
868 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
869 },
870 },
860 871
861 { } /* terminating entry */ 872 { } /* terminating entry */
862}; 873};
@@ -967,7 +978,7 @@ static int lvds_is_present_in_vbt(struct drm_device *dev)
967void intel_lvds_init(struct drm_device *dev) 978void intel_lvds_init(struct drm_device *dev)
968{ 979{
969 struct drm_i915_private *dev_priv = dev->dev_private; 980 struct drm_i915_private *dev_priv = dev->dev_private;
970 struct intel_output *intel_output; 981 struct intel_encoder *intel_encoder;
971 struct drm_connector *connector; 982 struct drm_connector *connector;
972 struct drm_encoder *encoder; 983 struct drm_encoder *encoder;
973 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 984 struct drm_display_mode *scan; /* *modes, *bios_mode; */
@@ -995,40 +1006,40 @@ void intel_lvds_init(struct drm_device *dev)
995 gpio = PCH_GPIOC; 1006 gpio = PCH_GPIOC;
996 } 1007 }
997 1008
998 intel_output = kzalloc(sizeof(struct intel_output) + 1009 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
999 sizeof(struct intel_lvds_priv), GFP_KERNEL); 1010 sizeof(struct intel_lvds_priv), GFP_KERNEL);
1000 if (!intel_output) { 1011 if (!intel_encoder) {
1001 return; 1012 return;
1002 } 1013 }
1003 1014
1004 connector = &intel_output->base; 1015 connector = &intel_encoder->base;
1005 encoder = &intel_output->enc; 1016 encoder = &intel_encoder->enc;
1006 drm_connector_init(dev, &intel_output->base, &intel_lvds_connector_funcs, 1017 drm_connector_init(dev, &intel_encoder->base, &intel_lvds_connector_funcs,
1007 DRM_MODE_CONNECTOR_LVDS); 1018 DRM_MODE_CONNECTOR_LVDS);
1008 1019
1009 drm_encoder_init(dev, &intel_output->enc, &intel_lvds_enc_funcs, 1020 drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
1010 DRM_MODE_ENCODER_LVDS); 1021 DRM_MODE_ENCODER_LVDS);
1011 1022
1012 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1023 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1013 intel_output->type = INTEL_OUTPUT_LVDS; 1024 intel_encoder->type = INTEL_OUTPUT_LVDS;
1014 1025
1015 intel_output->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 1026 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
1016 intel_output->crtc_mask = (1 << 1); 1027 intel_encoder->crtc_mask = (1 << 1);
1017 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 1028 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
1018 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 1029 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
1019 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1030 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1020 connector->interlace_allowed = false; 1031 connector->interlace_allowed = false;
1021 connector->doublescan_allowed = false; 1032 connector->doublescan_allowed = false;
1022 1033
1023 lvds_priv = (struct intel_lvds_priv *)(intel_output + 1); 1034 lvds_priv = (struct intel_lvds_priv *)(intel_encoder + 1);
1024 intel_output->dev_priv = lvds_priv; 1035 intel_encoder->dev_priv = lvds_priv;
1025 /* create the scaling mode property */ 1036 /* create the scaling mode property */
1026 drm_mode_create_scaling_mode_property(dev); 1037 drm_mode_create_scaling_mode_property(dev);
1027 /* 1038 /*
1028 * the initial panel fitting mode will be FULL_SCREEN. 1039 * the initial panel fitting mode will be FULL_SCREEN.
1029 */ 1040 */
1030 1041
1031 drm_connector_attach_property(&intel_output->base, 1042 drm_connector_attach_property(&intel_encoder->base,
1032 dev->mode_config.scaling_mode_property, 1043 dev->mode_config.scaling_mode_property,
1033 DRM_MODE_SCALE_FULLSCREEN); 1044 DRM_MODE_SCALE_FULLSCREEN);
1034 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN; 1045 lvds_priv->fitting_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -1043,8 +1054,8 @@ void intel_lvds_init(struct drm_device *dev)
1043 */ 1054 */
1044 1055
1045 /* Set up the DDC bus. */ 1056 /* Set up the DDC bus. */
1046 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C"); 1057 intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
1047 if (!intel_output->ddc_bus) { 1058 if (!intel_encoder->ddc_bus) {
1048 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 1059 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
1049 "failed.\n"); 1060 "failed.\n");
1050 goto failed; 1061 goto failed;
@@ -1054,7 +1065,10 @@ void intel_lvds_init(struct drm_device *dev)
1054 * Attempt to get the fixed panel mode from DDC. Assume that the 1065 * Attempt to get the fixed panel mode from DDC. Assume that the
1055 * preferred mode is the right one. 1066 * preferred mode is the right one.
1056 */ 1067 */
1057 intel_ddc_get_modes(intel_output); 1068 dev_priv->lvds_edid_good = true;
1069
1070 if (!intel_ddc_get_modes(intel_encoder))
1071 dev_priv->lvds_edid_good = false;
1058 1072
1059 list_for_each_entry(scan, &connector->probed_modes, head) { 1073 list_for_each_entry(scan, &connector->probed_modes, head) {
1060 mutex_lock(&dev->mode_config.mutex); 1074 mutex_lock(&dev->mode_config.mutex);
@@ -1132,9 +1146,9 @@ out:
1132 1146
1133failed: 1147failed:
1134 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1148 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1135 if (intel_output->ddc_bus) 1149 if (intel_encoder->ddc_bus)
1136 intel_i2c_destroy(intel_output->ddc_bus); 1150 intel_i2c_destroy(intel_encoder->ddc_bus);
1137 drm_connector_cleanup(connector); 1151 drm_connector_cleanup(connector);
1138 drm_encoder_cleanup(encoder); 1152 drm_encoder_cleanup(encoder);
1139 kfree(intel_output); 1153 kfree(intel_encoder);
1140} 1154}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 67e2f4632a24..8e5c83b2d120 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -23,6 +23,7 @@
23 * DEALINGS IN THE SOFTWARE. 23 * DEALINGS IN THE SOFTWARE.
24 */ 24 */
25 25
26#include <linux/slab.h>
26#include <linux/i2c.h> 27#include <linux/i2c.h>
27#include <linux/fb.h> 28#include <linux/fb.h>
28#include "drmP.h" 29#include "drmP.h"
@@ -33,7 +34,7 @@
33 * intel_ddc_probe 34 * intel_ddc_probe
34 * 35 *
35 */ 36 */
36bool intel_ddc_probe(struct intel_output *intel_output) 37bool intel_ddc_probe(struct intel_encoder *intel_encoder)
37{ 38{
38 u8 out_buf[] = { 0x0, 0x0}; 39 u8 out_buf[] = { 0x0, 0x0};
39 u8 buf[2]; 40 u8 buf[2];
@@ -53,9 +54,9 @@ bool intel_ddc_probe(struct intel_output *intel_output)
53 } 54 }
54 }; 55 };
55 56
56 intel_i2c_quirk_set(intel_output->base.dev, true); 57 intel_i2c_quirk_set(intel_encoder->base.dev, true);
57 ret = i2c_transfer(intel_output->ddc_bus, msgs, 2); 58 ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
58 intel_i2c_quirk_set(intel_output->base.dev, false); 59 intel_i2c_quirk_set(intel_encoder->base.dev, false);
59 if (ret == 2) 60 if (ret == 2)
60 return true; 61 return true;
61 62
@@ -68,19 +69,19 @@ bool intel_ddc_probe(struct intel_output *intel_output)
68 * 69 *
69 * Fetch the EDID information from @connector using the DDC bus. 70 * Fetch the EDID information from @connector using the DDC bus.
70 */ 71 */
71int intel_ddc_get_modes(struct intel_output *intel_output) 72int intel_ddc_get_modes(struct intel_encoder *intel_encoder)
72{ 73{
73 struct edid *edid; 74 struct edid *edid;
74 int ret = 0; 75 int ret = 0;
75 76
76 intel_i2c_quirk_set(intel_output->base.dev, true); 77 intel_i2c_quirk_set(intel_encoder->base.dev, true);
77 edid = drm_get_edid(&intel_output->base, intel_output->ddc_bus); 78 edid = drm_get_edid(&intel_encoder->base, intel_encoder->ddc_bus);
78 intel_i2c_quirk_set(intel_output->base.dev, false); 79 intel_i2c_quirk_set(intel_encoder->base.dev, false);
79 if (edid) { 80 if (edid) {
80 drm_mode_connector_update_edid_property(&intel_output->base, 81 drm_mode_connector_update_edid_property(&intel_encoder->base,
81 edid); 82 edid);
82 ret = drm_add_edid_modes(&intel_output->base, edid); 83 ret = drm_add_edid_modes(&intel_encoder->base, edid);
83 intel_output->base.display_info.raw_edid = NULL; 84 intel_encoder->base.display_info.raw_edid = NULL;
84 kfree(edid); 85 kfree(edid);
85 } 86 }
86 87
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 60595fc26fdd..6d524a1fc271 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -724,7 +724,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
724 int ret, tmp_width; 724 int ret, tmp_width;
725 struct overlay_registers *regs; 725 struct overlay_registers *regs;
726 bool scale_changed = false; 726 bool scale_changed = false;
727 struct drm_i915_gem_object *bo_priv = new_bo->driver_private; 727 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
728 struct drm_device *dev = overlay->dev; 728 struct drm_device *dev = overlay->dev;
729 729
730 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 730 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -809,7 +809,7 @@ int intel_overlay_do_put_image(struct intel_overlay *overlay,
809 intel_overlay_continue(overlay, scale_changed); 809 intel_overlay_continue(overlay, scale_changed);
810 810
811 overlay->old_vid_bo = overlay->vid_bo; 811 overlay->old_vid_bo = overlay->vid_bo;
812 overlay->vid_bo = new_bo->driver_private; 812 overlay->vid_bo = to_intel_bo(new_bo);
813 813
814 return 0; 814 return 0;
815 815
@@ -1344,7 +1344,7 @@ void intel_setup_overlay(struct drm_device *dev)
1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE); 1344 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1345 if (!reg_bo) 1345 if (!reg_bo)
1346 goto out_free; 1346 goto out_free;
1347 overlay->reg_bo = reg_bo->driver_private; 1347 overlay->reg_bo = to_intel_bo(reg_bo);
1348 1348
1349 if (OVERLAY_NONPHYSICAL(dev)) { 1349 if (OVERLAY_NONPHYSICAL(dev)) {
1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); 1350 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 48daee5c9c63..87d953664cb0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -26,6 +26,7 @@
26 * Eric Anholt <eric@anholt.net> 26 * Eric Anholt <eric@anholt.net>
27 */ 27 */
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h>
29#include <linux/delay.h> 30#include <linux/delay.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
@@ -53,7 +54,7 @@ struct intel_sdvo_priv {
53 u8 slave_addr; 54 u8 slave_addr;
54 55
55 /* Register for the SDVO device: SDVOB or SDVOC */ 56 /* Register for the SDVO device: SDVOB or SDVOC */
56 int output_device; 57 int sdvo_reg;
57 58
58 /* Active outputs controlled by this SDVO output */ 59 /* Active outputs controlled by this SDVO output */
59 uint16_t controlled_output; 60 uint16_t controlled_output;
@@ -123,7 +124,7 @@ struct intel_sdvo_priv {
123 */ 124 */
124 struct intel_sdvo_encode encode; 125 struct intel_sdvo_encode encode;
125 126
126 /* DDC bus used by this SDVO output */ 127 /* DDC bus used by this SDVO encoder */
127 uint8_t ddc_bus; 128 uint8_t ddc_bus;
128 129
129 /* Mac mini hack -- use the same DDC as the analog connector */ 130 /* Mac mini hack -- use the same DDC as the analog connector */
@@ -161,22 +162,22 @@ struct intel_sdvo_priv {
161}; 162};
162 163
163static bool 164static bool
164intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags); 165intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags);
165 166
166/** 167/**
167 * Writes the SDVOB or SDVOC with the given value, but always writes both 168 * Writes the SDVOB or SDVOC with the given value, but always writes both
168 * SDVOB and SDVOC to work around apparent hardware issues (according to 169 * SDVOB and SDVOC to work around apparent hardware issues (according to
169 * comments in the BIOS). 170 * comments in the BIOS).
170 */ 171 */
171static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val) 172static void intel_sdvo_write_sdvox(struct intel_encoder *intel_encoder, u32 val)
172{ 173{
173 struct drm_device *dev = intel_output->base.dev; 174 struct drm_device *dev = intel_encoder->base.dev;
174 struct drm_i915_private *dev_priv = dev->dev_private; 175 struct drm_i915_private *dev_priv = dev->dev_private;
175 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 176 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
176 u32 bval = val, cval = val; 177 u32 bval = val, cval = val;
177 int i; 178 int i;
178 179
179 if (sdvo_priv->output_device == SDVOB) { 180 if (sdvo_priv->sdvo_reg == SDVOB) {
180 cval = I915_READ(SDVOC); 181 cval = I915_READ(SDVOC);
181 } else { 182 } else {
182 bval = I915_READ(SDVOB); 183 bval = I915_READ(SDVOB);
@@ -195,10 +196,10 @@ static void intel_sdvo_write_sdvox(struct intel_output *intel_output, u32 val)
195 } 196 }
196} 197}
197 198
198static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr, 199static bool intel_sdvo_read_byte(struct intel_encoder *intel_encoder, u8 addr,
199 u8 *ch) 200 u8 *ch)
200{ 201{
201 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 202 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
202 u8 out_buf[2]; 203 u8 out_buf[2];
203 u8 buf[2]; 204 u8 buf[2];
204 int ret; 205 int ret;
@@ -221,7 +222,7 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
221 out_buf[0] = addr; 222 out_buf[0] = addr;
222 out_buf[1] = 0; 223 out_buf[1] = 0;
223 224
224 if ((ret = i2c_transfer(intel_output->i2c_bus, msgs, 2)) == 2) 225 if ((ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 2)) == 2)
225 { 226 {
226 *ch = buf[0]; 227 *ch = buf[0];
227 return true; 228 return true;
@@ -231,10 +232,10 @@ static bool intel_sdvo_read_byte(struct intel_output *intel_output, u8 addr,
231 return false; 232 return false;
232} 233}
233 234
234static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr, 235static bool intel_sdvo_write_byte(struct intel_encoder *intel_encoder, int addr,
235 u8 ch) 236 u8 ch)
236{ 237{
237 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 238 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
238 u8 out_buf[2]; 239 u8 out_buf[2];
239 struct i2c_msg msgs[] = { 240 struct i2c_msg msgs[] = {
240 { 241 {
@@ -248,7 +249,7 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
248 out_buf[0] = addr; 249 out_buf[0] = addr;
249 out_buf[1] = ch; 250 out_buf[1] = ch;
250 251
251 if (i2c_transfer(intel_output->i2c_bus, msgs, 1) == 1) 252 if (i2c_transfer(intel_encoder->i2c_bus, msgs, 1) == 1)
252 { 253 {
253 return true; 254 return true;
254 } 255 }
@@ -352,13 +353,13 @@ static const struct _sdvo_cmd_name {
352 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA), 353 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
353}; 354};
354 355
355#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 356#define SDVO_NAME(dev_priv) ((dev_priv)->sdvo_reg == SDVOB ? "SDVOB" : "SDVOC")
356#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 357#define SDVO_PRIV(encoder) ((struct intel_sdvo_priv *) (encoder)->dev_priv)
357 358
358static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, 359static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
359 void *args, int args_len) 360 void *args, int args_len)
360{ 361{
361 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 362 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
362 int i; 363 int i;
363 364
364 DRM_DEBUG_KMS("%s: W: %02X ", 365 DRM_DEBUG_KMS("%s: W: %02X ",
@@ -378,19 +379,19 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
378 DRM_LOG_KMS("\n"); 379 DRM_LOG_KMS("\n");
379} 380}
380 381
381static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 382static void intel_sdvo_write_cmd(struct intel_encoder *intel_encoder, u8 cmd,
382 void *args, int args_len) 383 void *args, int args_len)
383{ 384{
384 int i; 385 int i;
385 386
386 intel_sdvo_debug_write(intel_output, cmd, args, args_len); 387 intel_sdvo_debug_write(intel_encoder, cmd, args, args_len);
387 388
388 for (i = 0; i < args_len; i++) { 389 for (i = 0; i < args_len; i++) {
389 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0 - i, 390 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0 - i,
390 ((u8*)args)[i]); 391 ((u8*)args)[i]);
391 } 392 }
392 393
393 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 394 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_OPCODE, cmd);
394} 395}
395 396
396static const char *cmd_status_names[] = { 397static const char *cmd_status_names[] = {
@@ -403,11 +404,11 @@ static const char *cmd_status_names[] = {
403 "Scaling not supported" 404 "Scaling not supported"
404}; 405};
405 406
406static void intel_sdvo_debug_response(struct intel_output *intel_output, 407static void intel_sdvo_debug_response(struct intel_encoder *intel_encoder,
407 void *response, int response_len, 408 void *response, int response_len,
408 u8 status) 409 u8 status)
409{ 410{
410 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 411 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
411 int i; 412 int i;
412 413
413 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv)); 414 DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(sdvo_priv));
@@ -422,7 +423,7 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
422 DRM_LOG_KMS("\n"); 423 DRM_LOG_KMS("\n");
423} 424}
424 425
425static u8 intel_sdvo_read_response(struct intel_output *intel_output, 426static u8 intel_sdvo_read_response(struct intel_encoder *intel_encoder,
426 void *response, int response_len) 427 void *response, int response_len)
427{ 428{
428 int i; 429 int i;
@@ -432,16 +433,16 @@ static u8 intel_sdvo_read_response(struct intel_output *intel_output,
432 while (retry--) { 433 while (retry--) {
433 /* Read the command response */ 434 /* Read the command response */
434 for (i = 0; i < response_len; i++) { 435 for (i = 0; i < response_len; i++) {
435 intel_sdvo_read_byte(intel_output, 436 intel_sdvo_read_byte(intel_encoder,
436 SDVO_I2C_RETURN_0 + i, 437 SDVO_I2C_RETURN_0 + i,
437 &((u8 *)response)[i]); 438 &((u8 *)response)[i]);
438 } 439 }
439 440
440 /* read the return status */ 441 /* read the return status */
441 intel_sdvo_read_byte(intel_output, SDVO_I2C_CMD_STATUS, 442 intel_sdvo_read_byte(intel_encoder, SDVO_I2C_CMD_STATUS,
442 &status); 443 &status);
443 444
444 intel_sdvo_debug_response(intel_output, response, response_len, 445 intel_sdvo_debug_response(intel_encoder, response, response_len,
445 status); 446 status);
446 if (status != SDVO_CMD_STATUS_PENDING) 447 if (status != SDVO_CMD_STATUS_PENDING)
447 return status; 448 return status;
@@ -469,10 +470,10 @@ static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
469 * another I2C transaction after issuing the DDC bus switch, it will be 470 * another I2C transaction after issuing the DDC bus switch, it will be
470 * switched to the internal SDVO register. 471 * switched to the internal SDVO register.
471 */ 472 */
472static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output, 473static void intel_sdvo_set_control_bus_switch(struct intel_encoder *intel_encoder,
473 u8 target) 474 u8 target)
474{ 475{
475 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 476 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
476 u8 out_buf[2], cmd_buf[2], ret_value[2], ret; 477 u8 out_buf[2], cmd_buf[2], ret_value[2], ret;
477 struct i2c_msg msgs[] = { 478 struct i2c_msg msgs[] = {
478 { 479 {
@@ -496,10 +497,10 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
496 }, 497 },
497 }; 498 };
498 499
499 intel_sdvo_debug_write(intel_output, SDVO_CMD_SET_CONTROL_BUS_SWITCH, 500 intel_sdvo_debug_write(intel_encoder, SDVO_CMD_SET_CONTROL_BUS_SWITCH,
500 &target, 1); 501 &target, 1);
501 /* write the DDC switch command argument */ 502 /* write the DDC switch command argument */
502 intel_sdvo_write_byte(intel_output, SDVO_I2C_ARG_0, target); 503 intel_sdvo_write_byte(intel_encoder, SDVO_I2C_ARG_0, target);
503 504
504 out_buf[0] = SDVO_I2C_OPCODE; 505 out_buf[0] = SDVO_I2C_OPCODE;
505 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH; 506 out_buf[1] = SDVO_CMD_SET_CONTROL_BUS_SWITCH;
@@ -508,7 +509,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
508 ret_value[0] = 0; 509 ret_value[0] = 0;
509 ret_value[1] = 0; 510 ret_value[1] = 0;
510 511
511 ret = i2c_transfer(intel_output->i2c_bus, msgs, 3); 512 ret = i2c_transfer(intel_encoder->i2c_bus, msgs, 3);
512 if (ret != 3) { 513 if (ret != 3) {
513 /* failure in I2C transfer */ 514 /* failure in I2C transfer */
514 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 515 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
@@ -522,7 +523,7 @@ static void intel_sdvo_set_control_bus_switch(struct intel_output *intel_output,
522 return; 523 return;
523} 524}
524 525
525static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool target_0, bool target_1) 526static bool intel_sdvo_set_target_input(struct intel_encoder *intel_encoder, bool target_0, bool target_1)
526{ 527{
527 struct intel_sdvo_set_target_input_args targets = {0}; 528 struct intel_sdvo_set_target_input_args targets = {0};
528 u8 status; 529 u8 status;
@@ -533,10 +534,10 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
533 if (target_1) 534 if (target_1)
534 targets.target_1 = 1; 535 targets.target_1 = 1;
535 536
536 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_INPUT, &targets, 537 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_INPUT, &targets,
537 sizeof(targets)); 538 sizeof(targets));
538 539
539 status = intel_sdvo_read_response(intel_output, NULL, 0); 540 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
540 541
541 return (status == SDVO_CMD_STATUS_SUCCESS); 542 return (status == SDVO_CMD_STATUS_SUCCESS);
542} 543}
@@ -547,13 +548,13 @@ static bool intel_sdvo_set_target_input(struct intel_output *intel_output, bool
547 * This function is making an assumption about the layout of the response, 548 * This function is making an assumption about the layout of the response,
548 * which should be checked against the docs. 549 * which should be checked against the docs.
549 */ 550 */
550static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, bool *input_1, bool *input_2) 551static bool intel_sdvo_get_trained_inputs(struct intel_encoder *intel_encoder, bool *input_1, bool *input_2)
551{ 552{
552 struct intel_sdvo_get_trained_inputs_response response; 553 struct intel_sdvo_get_trained_inputs_response response;
553 u8 status; 554 u8 status;
554 555
555 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0); 556 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_TRAINED_INPUTS, NULL, 0);
556 status = intel_sdvo_read_response(intel_output, &response, sizeof(response)); 557 status = intel_sdvo_read_response(intel_encoder, &response, sizeof(response));
557 if (status != SDVO_CMD_STATUS_SUCCESS) 558 if (status != SDVO_CMD_STATUS_SUCCESS)
558 return false; 559 return false;
559 560
@@ -562,29 +563,29 @@ static bool intel_sdvo_get_trained_inputs(struct intel_output *intel_output, boo
562 return true; 563 return true;
563} 564}
564 565
565static bool intel_sdvo_get_active_outputs(struct intel_output *intel_output, 566static bool intel_sdvo_get_active_outputs(struct intel_encoder *intel_encoder,
566 u16 *outputs) 567 u16 *outputs)
567{ 568{
568 u8 status; 569 u8 status;
569 570
570 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0); 571 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_OUTPUTS, NULL, 0);
571 status = intel_sdvo_read_response(intel_output, outputs, sizeof(*outputs)); 572 status = intel_sdvo_read_response(intel_encoder, outputs, sizeof(*outputs));
572 573
573 return (status == SDVO_CMD_STATUS_SUCCESS); 574 return (status == SDVO_CMD_STATUS_SUCCESS);
574} 575}
575 576
576static bool intel_sdvo_set_active_outputs(struct intel_output *intel_output, 577static bool intel_sdvo_set_active_outputs(struct intel_encoder *intel_encoder,
577 u16 outputs) 578 u16 outputs)
578{ 579{
579 u8 status; 580 u8 status;
580 581
581 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs, 582 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_OUTPUTS, &outputs,
582 sizeof(outputs)); 583 sizeof(outputs));
583 status = intel_sdvo_read_response(intel_output, NULL, 0); 584 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
584 return (status == SDVO_CMD_STATUS_SUCCESS); 585 return (status == SDVO_CMD_STATUS_SUCCESS);
585} 586}
586 587
587static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output, 588static bool intel_sdvo_set_encoder_power_state(struct intel_encoder *intel_encoder,
588 int mode) 589 int mode)
589{ 590{
590 u8 status, state = SDVO_ENCODER_STATE_ON; 591 u8 status, state = SDVO_ENCODER_STATE_ON;
@@ -604,24 +605,24 @@ static bool intel_sdvo_set_encoder_power_state(struct intel_output *intel_output
604 break; 605 break;
605 } 606 }
606 607
607 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ENCODER_POWER_STATE, &state, 608 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
608 sizeof(state)); 609 sizeof(state));
609 status = intel_sdvo_read_response(intel_output, NULL, 0); 610 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
610 611
611 return (status == SDVO_CMD_STATUS_SUCCESS); 612 return (status == SDVO_CMD_STATUS_SUCCESS);
612} 613}
613 614
614static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_output, 615static bool intel_sdvo_get_input_pixel_clock_range(struct intel_encoder *intel_encoder,
615 int *clock_min, 616 int *clock_min,
616 int *clock_max) 617 int *clock_max)
617{ 618{
618 struct intel_sdvo_pixel_clock_range clocks; 619 struct intel_sdvo_pixel_clock_range clocks;
619 u8 status; 620 u8 status;
620 621
621 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, 622 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
622 NULL, 0); 623 NULL, 0);
623 624
624 status = intel_sdvo_read_response(intel_output, &clocks, sizeof(clocks)); 625 status = intel_sdvo_read_response(intel_encoder, &clocks, sizeof(clocks));
625 626
626 if (status != SDVO_CMD_STATUS_SUCCESS) 627 if (status != SDVO_CMD_STATUS_SUCCESS)
627 return false; 628 return false;
@@ -633,31 +634,31 @@ static bool intel_sdvo_get_input_pixel_clock_range(struct intel_output *intel_ou
633 return true; 634 return true;
634} 635}
635 636
636static bool intel_sdvo_set_target_output(struct intel_output *intel_output, 637static bool intel_sdvo_set_target_output(struct intel_encoder *intel_encoder,
637 u16 outputs) 638 u16 outputs)
638{ 639{
639 u8 status; 640 u8 status;
640 641
641 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_TARGET_OUTPUT, &outputs, 642 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TARGET_OUTPUT, &outputs,
642 sizeof(outputs)); 643 sizeof(outputs));
643 644
644 status = intel_sdvo_read_response(intel_output, NULL, 0); 645 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
645 return (status == SDVO_CMD_STATUS_SUCCESS); 646 return (status == SDVO_CMD_STATUS_SUCCESS);
646} 647}
647 648
648static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd, 649static bool intel_sdvo_get_timing(struct intel_encoder *intel_encoder, u8 cmd,
649 struct intel_sdvo_dtd *dtd) 650 struct intel_sdvo_dtd *dtd)
650{ 651{
651 u8 status; 652 u8 status;
652 653
653 intel_sdvo_write_cmd(intel_output, cmd, NULL, 0); 654 intel_sdvo_write_cmd(intel_encoder, cmd, NULL, 0);
654 status = intel_sdvo_read_response(intel_output, &dtd->part1, 655 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
655 sizeof(dtd->part1)); 656 sizeof(dtd->part1));
656 if (status != SDVO_CMD_STATUS_SUCCESS) 657 if (status != SDVO_CMD_STATUS_SUCCESS)
657 return false; 658 return false;
658 659
659 intel_sdvo_write_cmd(intel_output, cmd + 1, NULL, 0); 660 intel_sdvo_write_cmd(intel_encoder, cmd + 1, NULL, 0);
660 status = intel_sdvo_read_response(intel_output, &dtd->part2, 661 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
661 sizeof(dtd->part2)); 662 sizeof(dtd->part2));
662 if (status != SDVO_CMD_STATUS_SUCCESS) 663 if (status != SDVO_CMD_STATUS_SUCCESS)
663 return false; 664 return false;
@@ -665,60 +666,60 @@ static bool intel_sdvo_get_timing(struct intel_output *intel_output, u8 cmd,
665 return true; 666 return true;
666} 667}
667 668
668static bool intel_sdvo_get_input_timing(struct intel_output *intel_output, 669static bool intel_sdvo_get_input_timing(struct intel_encoder *intel_encoder,
669 struct intel_sdvo_dtd *dtd) 670 struct intel_sdvo_dtd *dtd)
670{ 671{
671 return intel_sdvo_get_timing(intel_output, 672 return intel_sdvo_get_timing(intel_encoder,
672 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd); 673 SDVO_CMD_GET_INPUT_TIMINGS_PART1, dtd);
673} 674}
674 675
675static bool intel_sdvo_get_output_timing(struct intel_output *intel_output, 676static bool intel_sdvo_get_output_timing(struct intel_encoder *intel_encoder,
676 struct intel_sdvo_dtd *dtd) 677 struct intel_sdvo_dtd *dtd)
677{ 678{
678 return intel_sdvo_get_timing(intel_output, 679 return intel_sdvo_get_timing(intel_encoder,
679 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd); 680 SDVO_CMD_GET_OUTPUT_TIMINGS_PART1, dtd);
680} 681}
681 682
682static bool intel_sdvo_set_timing(struct intel_output *intel_output, u8 cmd, 683static bool intel_sdvo_set_timing(struct intel_encoder *intel_encoder, u8 cmd,
683 struct intel_sdvo_dtd *dtd) 684 struct intel_sdvo_dtd *dtd)
684{ 685{
685 u8 status; 686 u8 status;
686 687
687 intel_sdvo_write_cmd(intel_output, cmd, &dtd->part1, sizeof(dtd->part1)); 688 intel_sdvo_write_cmd(intel_encoder, cmd, &dtd->part1, sizeof(dtd->part1));
688 status = intel_sdvo_read_response(intel_output, NULL, 0); 689 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
689 if (status != SDVO_CMD_STATUS_SUCCESS) 690 if (status != SDVO_CMD_STATUS_SUCCESS)
690 return false; 691 return false;
691 692
692 intel_sdvo_write_cmd(intel_output, cmd + 1, &dtd->part2, sizeof(dtd->part2)); 693 intel_sdvo_write_cmd(intel_encoder, cmd + 1, &dtd->part2, sizeof(dtd->part2));
693 status = intel_sdvo_read_response(intel_output, NULL, 0); 694 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
694 if (status != SDVO_CMD_STATUS_SUCCESS) 695 if (status != SDVO_CMD_STATUS_SUCCESS)
695 return false; 696 return false;
696 697
697 return true; 698 return true;
698} 699}
699 700
700static bool intel_sdvo_set_input_timing(struct intel_output *intel_output, 701static bool intel_sdvo_set_input_timing(struct intel_encoder *intel_encoder,
701 struct intel_sdvo_dtd *dtd) 702 struct intel_sdvo_dtd *dtd)
702{ 703{
703 return intel_sdvo_set_timing(intel_output, 704 return intel_sdvo_set_timing(intel_encoder,
704 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd); 705 SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
705} 706}
706 707
707static bool intel_sdvo_set_output_timing(struct intel_output *intel_output, 708static bool intel_sdvo_set_output_timing(struct intel_encoder *intel_encoder,
708 struct intel_sdvo_dtd *dtd) 709 struct intel_sdvo_dtd *dtd)
709{ 710{
710 return intel_sdvo_set_timing(intel_output, 711 return intel_sdvo_set_timing(intel_encoder,
711 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 712 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
712} 713}
713 714
714static bool 715static bool
715intel_sdvo_create_preferred_input_timing(struct intel_output *output, 716intel_sdvo_create_preferred_input_timing(struct intel_encoder *intel_encoder,
716 uint16_t clock, 717 uint16_t clock,
717 uint16_t width, 718 uint16_t width,
718 uint16_t height) 719 uint16_t height)
719{ 720{
720 struct intel_sdvo_preferred_input_timing_args args; 721 struct intel_sdvo_preferred_input_timing_args args;
721 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 722 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
722 uint8_t status; 723 uint8_t status;
723 724
724 memset(&args, 0, sizeof(args)); 725 memset(&args, 0, sizeof(args));
@@ -732,32 +733,33 @@ intel_sdvo_create_preferred_input_timing(struct intel_output *output,
732 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height)) 733 sdvo_priv->sdvo_lvds_fixed_mode->vdisplay != height))
733 args.scaled = 1; 734 args.scaled = 1;
734 735
735 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING, 736 intel_sdvo_write_cmd(intel_encoder,
737 SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
736 &args, sizeof(args)); 738 &args, sizeof(args));
737 status = intel_sdvo_read_response(output, NULL, 0); 739 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
738 if (status != SDVO_CMD_STATUS_SUCCESS) 740 if (status != SDVO_CMD_STATUS_SUCCESS)
739 return false; 741 return false;
740 742
741 return true; 743 return true;
742} 744}
743 745
744static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output, 746static bool intel_sdvo_get_preferred_input_timing(struct intel_encoder *intel_encoder,
745 struct intel_sdvo_dtd *dtd) 747 struct intel_sdvo_dtd *dtd)
746{ 748{
747 bool status; 749 bool status;
748 750
749 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1, 751 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
750 NULL, 0); 752 NULL, 0);
751 753
752 status = intel_sdvo_read_response(output, &dtd->part1, 754 status = intel_sdvo_read_response(intel_encoder, &dtd->part1,
753 sizeof(dtd->part1)); 755 sizeof(dtd->part1));
754 if (status != SDVO_CMD_STATUS_SUCCESS) 756 if (status != SDVO_CMD_STATUS_SUCCESS)
755 return false; 757 return false;
756 758
757 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2, 759 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
758 NULL, 0); 760 NULL, 0);
759 761
760 status = intel_sdvo_read_response(output, &dtd->part2, 762 status = intel_sdvo_read_response(intel_encoder, &dtd->part2,
761 sizeof(dtd->part2)); 763 sizeof(dtd->part2));
762 if (status != SDVO_CMD_STATUS_SUCCESS) 764 if (status != SDVO_CMD_STATUS_SUCCESS)
763 return false; 765 return false;
@@ -765,12 +767,12 @@ static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
765 return false; 767 return false;
766} 768}
767 769
768static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 770static int intel_sdvo_get_clock_rate_mult(struct intel_encoder *intel_encoder)
769{ 771{
770 u8 response, status; 772 u8 response, status;
771 773
772 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0); 774 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_CLOCK_RATE_MULT, NULL, 0);
773 status = intel_sdvo_read_response(intel_output, &response, 1); 775 status = intel_sdvo_read_response(intel_encoder, &response, 1);
774 776
775 if (status != SDVO_CMD_STATUS_SUCCESS) { 777 if (status != SDVO_CMD_STATUS_SUCCESS) {
776 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n"); 778 DRM_DEBUG_KMS("Couldn't get SDVO clock rate multiplier\n");
@@ -782,12 +784,12 @@ static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
782 return response; 784 return response;
783} 785}
784 786
785static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8 val) 787static bool intel_sdvo_set_clock_rate_mult(struct intel_encoder *intel_encoder, u8 val)
786{ 788{
787 u8 status; 789 u8 status;
788 790
789 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1); 791 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
790 status = intel_sdvo_read_response(intel_output, NULL, 0); 792 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
791 if (status != SDVO_CMD_STATUS_SUCCESS) 793 if (status != SDVO_CMD_STATUS_SUCCESS)
792 return false; 794 return false;
793 795
@@ -876,13 +878,13 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
876 mode->flags |= DRM_MODE_FLAG_PVSYNC; 878 mode->flags |= DRM_MODE_FLAG_PVSYNC;
877} 879}
878 880
879static bool intel_sdvo_get_supp_encode(struct intel_output *output, 881static bool intel_sdvo_get_supp_encode(struct intel_encoder *intel_encoder,
880 struct intel_sdvo_encode *encode) 882 struct intel_sdvo_encode *encode)
881{ 883{
882 uint8_t status; 884 uint8_t status;
883 885
884 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0); 886 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
885 status = intel_sdvo_read_response(output, encode, sizeof(*encode)); 887 status = intel_sdvo_read_response(intel_encoder, encode, sizeof(*encode));
886 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */ 888 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
887 memset(encode, 0, sizeof(*encode)); 889 memset(encode, 0, sizeof(*encode));
888 return false; 890 return false;
@@ -891,29 +893,30 @@ static bool intel_sdvo_get_supp_encode(struct intel_output *output,
891 return true; 893 return true;
892} 894}
893 895
894static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode) 896static bool intel_sdvo_set_encode(struct intel_encoder *intel_encoder,
897 uint8_t mode)
895{ 898{
896 uint8_t status; 899 uint8_t status;
897 900
898 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1); 901 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ENCODE, &mode, 1);
899 status = intel_sdvo_read_response(output, NULL, 0); 902 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
900 903
901 return (status == SDVO_CMD_STATUS_SUCCESS); 904 return (status == SDVO_CMD_STATUS_SUCCESS);
902} 905}
903 906
904static bool intel_sdvo_set_colorimetry(struct intel_output *output, 907static bool intel_sdvo_set_colorimetry(struct intel_encoder *intel_encoder,
905 uint8_t mode) 908 uint8_t mode)
906{ 909{
907 uint8_t status; 910 uint8_t status;
908 911
909 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1); 912 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
910 status = intel_sdvo_read_response(output, NULL, 0); 913 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
911 914
912 return (status == SDVO_CMD_STATUS_SUCCESS); 915 return (status == SDVO_CMD_STATUS_SUCCESS);
913} 916}
914 917
915#if 0 918#if 0
916static void intel_sdvo_dump_hdmi_buf(struct intel_output *output) 919static void intel_sdvo_dump_hdmi_buf(struct intel_encoder *intel_encoder)
917{ 920{
918 int i, j; 921 int i, j;
919 uint8_t set_buf_index[2]; 922 uint8_t set_buf_index[2];
@@ -922,43 +925,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
922 uint8_t buf[48]; 925 uint8_t buf[48];
923 uint8_t *pos; 926 uint8_t *pos;
924 927
925 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0); 928 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
926 intel_sdvo_read_response(output, &av_split, 1); 929 intel_sdvo_read_response(encoder, &av_split, 1);
927 930
928 for (i = 0; i <= av_split; i++) { 931 for (i = 0; i <= av_split; i++) {
929 set_buf_index[0] = i; set_buf_index[1] = 0; 932 set_buf_index[0] = i; set_buf_index[1] = 0;
930 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, 933 intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
931 set_buf_index, 2); 934 set_buf_index, 2);
932 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0); 935 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
933 intel_sdvo_read_response(output, &buf_size, 1); 936 intel_sdvo_read_response(encoder, &buf_size, 1);
934 937
935 pos = buf; 938 pos = buf;
936 for (j = 0; j <= buf_size; j += 8) { 939 for (j = 0; j <= buf_size; j += 8) {
937 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA, 940 intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
938 NULL, 0); 941 NULL, 0);
939 intel_sdvo_read_response(output, pos, 8); 942 intel_sdvo_read_response(encoder, pos, 8);
940 pos += 8; 943 pos += 8;
941 } 944 }
942 } 945 }
943} 946}
944#endif 947#endif
945 948
946static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index, 949static void intel_sdvo_set_hdmi_buf(struct intel_encoder *intel_encoder,
947 uint8_t *data, int8_t size, uint8_t tx_rate) 950 int index,
951 uint8_t *data, int8_t size, uint8_t tx_rate)
948{ 952{
949 uint8_t set_buf_index[2]; 953 uint8_t set_buf_index[2];
950 954
951 set_buf_index[0] = index; 955 set_buf_index[0] = index;
952 set_buf_index[1] = 0; 956 set_buf_index[1] = 0;
953 957
954 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2); 958 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_INDEX,
959 set_buf_index, 2);
955 960
956 for (; size > 0; size -= 8) { 961 for (; size > 0; size -= 8) {
957 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8); 962 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_DATA, data, 8);
958 data += 8; 963 data += 8;
959 } 964 }
960 965
961 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1); 966 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
962} 967}
963 968
964static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size) 969static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
@@ -1033,7 +1038,7 @@ struct dip_infoframe {
1033 } __attribute__ ((packed)) u; 1038 } __attribute__ ((packed)) u;
1034} __attribute__((packed)); 1039} __attribute__((packed));
1035 1040
1036static void intel_sdvo_set_avi_infoframe(struct intel_output *output, 1041static void intel_sdvo_set_avi_infoframe(struct intel_encoder *intel_encoder,
1037 struct drm_display_mode * mode) 1042 struct drm_display_mode * mode)
1038{ 1043{
1039 struct dip_infoframe avi_if = { 1044 struct dip_infoframe avi_if = {
@@ -1044,15 +1049,16 @@ static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
1044 1049
1045 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if, 1050 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
1046 4 + avi_if.len); 1051 4 + avi_if.len);
1047 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len, 1052 intel_sdvo_set_hdmi_buf(intel_encoder, 1, (uint8_t *)&avi_if,
1053 4 + avi_if.len,
1048 SDVO_HBUF_TX_VSYNC); 1054 SDVO_HBUF_TX_VSYNC);
1049} 1055}
1050 1056
1051static void intel_sdvo_set_tv_format(struct intel_output *output) 1057static void intel_sdvo_set_tv_format(struct intel_encoder *intel_encoder)
1052{ 1058{
1053 1059
1054 struct intel_sdvo_tv_format format; 1060 struct intel_sdvo_tv_format format;
1055 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1061 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1056 uint32_t format_map, i; 1062 uint32_t format_map, i;
1057 uint8_t status; 1063 uint8_t status;
1058 1064
@@ -1065,10 +1071,10 @@ static void intel_sdvo_set_tv_format(struct intel_output *output)
1065 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ? 1071 memcpy(&format, &format_map, sizeof(format_map) > sizeof(format) ?
1066 sizeof(format) : sizeof(format_map)); 1072 sizeof(format) : sizeof(format_map));
1067 1073
1068 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, &format_map, 1074 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_TV_FORMAT, &format_map,
1069 sizeof(format)); 1075 sizeof(format));
1070 1076
1071 status = intel_sdvo_read_response(output, NULL, 0); 1077 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1072 if (status != SDVO_CMD_STATUS_SUCCESS) 1078 if (status != SDVO_CMD_STATUS_SUCCESS)
1073 DRM_DEBUG_KMS("%s: Failed to set TV format\n", 1079 DRM_DEBUG_KMS("%s: Failed to set TV format\n",
1074 SDVO_NAME(sdvo_priv)); 1080 SDVO_NAME(sdvo_priv));
@@ -1078,8 +1084,8 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1078 struct drm_display_mode *mode, 1084 struct drm_display_mode *mode,
1079 struct drm_display_mode *adjusted_mode) 1085 struct drm_display_mode *adjusted_mode)
1080{ 1086{
1081 struct intel_output *output = enc_to_intel_output(encoder); 1087 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1082 struct intel_sdvo_priv *dev_priv = output->dev_priv; 1088 struct intel_sdvo_priv *dev_priv = intel_encoder->dev_priv;
1083 1089
1084 if (dev_priv->is_tv) { 1090 if (dev_priv->is_tv) {
1085 struct intel_sdvo_dtd output_dtd; 1091 struct intel_sdvo_dtd output_dtd;
@@ -1094,22 +1100,22 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1094 1100
1095 /* Set output timings */ 1101 /* Set output timings */
1096 intel_sdvo_get_dtd_from_mode(&output_dtd, mode); 1102 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
1097 intel_sdvo_set_target_output(output, 1103 intel_sdvo_set_target_output(intel_encoder,
1098 dev_priv->controlled_output); 1104 dev_priv->controlled_output);
1099 intel_sdvo_set_output_timing(output, &output_dtd); 1105 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1100 1106
1101 /* Set the input timing to the screen. Assume always input 0. */ 1107 /* Set the input timing to the screen. Assume always input 0. */
1102 intel_sdvo_set_target_input(output, true, false); 1108 intel_sdvo_set_target_input(intel_encoder, true, false);
1103 1109
1104 1110
1105 success = intel_sdvo_create_preferred_input_timing(output, 1111 success = intel_sdvo_create_preferred_input_timing(intel_encoder,
1106 mode->clock / 10, 1112 mode->clock / 10,
1107 mode->hdisplay, 1113 mode->hdisplay,
1108 mode->vdisplay); 1114 mode->vdisplay);
1109 if (success) { 1115 if (success) {
1110 struct intel_sdvo_dtd input_dtd; 1116 struct intel_sdvo_dtd input_dtd;
1111 1117
1112 intel_sdvo_get_preferred_input_timing(output, 1118 intel_sdvo_get_preferred_input_timing(intel_encoder,
1113 &input_dtd); 1119 &input_dtd);
1114 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1120 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1115 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1121 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1132,16 +1138,16 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1132 intel_sdvo_get_dtd_from_mode(&output_dtd, 1138 intel_sdvo_get_dtd_from_mode(&output_dtd,
1133 dev_priv->sdvo_lvds_fixed_mode); 1139 dev_priv->sdvo_lvds_fixed_mode);
1134 1140
1135 intel_sdvo_set_target_output(output, 1141 intel_sdvo_set_target_output(intel_encoder,
1136 dev_priv->controlled_output); 1142 dev_priv->controlled_output);
1137 intel_sdvo_set_output_timing(output, &output_dtd); 1143 intel_sdvo_set_output_timing(intel_encoder, &output_dtd);
1138 1144
1139 /* Set the input timing to the screen. Assume always input 0. */ 1145 /* Set the input timing to the screen. Assume always input 0. */
1140 intel_sdvo_set_target_input(output, true, false); 1146 intel_sdvo_set_target_input(intel_encoder, true, false);
1141 1147
1142 1148
1143 success = intel_sdvo_create_preferred_input_timing( 1149 success = intel_sdvo_create_preferred_input_timing(
1144 output, 1150 intel_encoder,
1145 mode->clock / 10, 1151 mode->clock / 10,
1146 mode->hdisplay, 1152 mode->hdisplay,
1147 mode->vdisplay); 1153 mode->vdisplay);
@@ -1149,7 +1155,7 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1149 if (success) { 1155 if (success) {
1150 struct intel_sdvo_dtd input_dtd; 1156 struct intel_sdvo_dtd input_dtd;
1151 1157
1152 intel_sdvo_get_preferred_input_timing(output, 1158 intel_sdvo_get_preferred_input_timing(intel_encoder,
1153 &input_dtd); 1159 &input_dtd);
1154 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd); 1160 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
1155 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags; 1161 dev_priv->sdvo_flags = input_dtd.part2.sdvo_flags;
@@ -1181,8 +1187,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1181 struct drm_i915_private *dev_priv = dev->dev_private; 1187 struct drm_i915_private *dev_priv = dev->dev_private;
1182 struct drm_crtc *crtc = encoder->crtc; 1188 struct drm_crtc *crtc = encoder->crtc;
1183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1184 struct intel_output *output = enc_to_intel_output(encoder); 1190 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1185 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1191 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1186 u32 sdvox = 0; 1192 u32 sdvox = 0;
1187 int sdvo_pixel_multiply; 1193 int sdvo_pixel_multiply;
1188 struct intel_sdvo_in_out_map in_out; 1194 struct intel_sdvo_in_out_map in_out;
@@ -1201,12 +1207,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1201 in_out.in0 = sdvo_priv->controlled_output; 1207 in_out.in0 = sdvo_priv->controlled_output;
1202 in_out.in1 = 0; 1208 in_out.in1 = 0;
1203 1209
1204 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, 1210 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_IN_OUT_MAP,
1205 &in_out, sizeof(in_out)); 1211 &in_out, sizeof(in_out));
1206 status = intel_sdvo_read_response(output, NULL, 0); 1212 status = intel_sdvo_read_response(intel_encoder, NULL, 0);
1207 1213
1208 if (sdvo_priv->is_hdmi) { 1214 if (sdvo_priv->is_hdmi) {
1209 intel_sdvo_set_avi_infoframe(output, mode); 1215 intel_sdvo_set_avi_infoframe(intel_encoder, mode);
1210 sdvox |= SDVO_AUDIO_ENABLE; 1216 sdvox |= SDVO_AUDIO_ENABLE;
1211 } 1217 }
1212 1218
@@ -1223,16 +1229,16 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1223 */ 1229 */
1224 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) { 1230 if (!sdvo_priv->is_tv && !sdvo_priv->is_lvds) {
1225 /* Set the output timing to the screen */ 1231 /* Set the output timing to the screen */
1226 intel_sdvo_set_target_output(output, 1232 intel_sdvo_set_target_output(intel_encoder,
1227 sdvo_priv->controlled_output); 1233 sdvo_priv->controlled_output);
1228 intel_sdvo_set_output_timing(output, &input_dtd); 1234 intel_sdvo_set_output_timing(intel_encoder, &input_dtd);
1229 } 1235 }
1230 1236
1231 /* Set the input timing to the screen. Assume always input 0. */ 1237 /* Set the input timing to the screen. Assume always input 0. */
1232 intel_sdvo_set_target_input(output, true, false); 1238 intel_sdvo_set_target_input(intel_encoder, true, false);
1233 1239
1234 if (sdvo_priv->is_tv) 1240 if (sdvo_priv->is_tv)
1235 intel_sdvo_set_tv_format(output); 1241 intel_sdvo_set_tv_format(intel_encoder);
1236 1242
1237 /* We would like to use intel_sdvo_create_preferred_input_timing() to 1243 /* We would like to use intel_sdvo_create_preferred_input_timing() to
1238 * provide the device with a timing it can support, if it supports that 1244 * provide the device with a timing it can support, if it supports that
@@ -1240,29 +1246,29 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1240 * output the preferred timing, and we don't support that currently. 1246 * output the preferred timing, and we don't support that currently.
1241 */ 1247 */
1242#if 0 1248#if 0
1243 success = intel_sdvo_create_preferred_input_timing(output, clock, 1249 success = intel_sdvo_create_preferred_input_timing(encoder, clock,
1244 width, height); 1250 width, height);
1245 if (success) { 1251 if (success) {
1246 struct intel_sdvo_dtd *input_dtd; 1252 struct intel_sdvo_dtd *input_dtd;
1247 1253
1248 intel_sdvo_get_preferred_input_timing(output, &input_dtd); 1254 intel_sdvo_get_preferred_input_timing(encoder, &input_dtd);
1249 intel_sdvo_set_input_timing(output, &input_dtd); 1255 intel_sdvo_set_input_timing(encoder, &input_dtd);
1250 } 1256 }
1251#else 1257#else
1252 intel_sdvo_set_input_timing(output, &input_dtd); 1258 intel_sdvo_set_input_timing(intel_encoder, &input_dtd);
1253#endif 1259#endif
1254 1260
1255 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1261 switch (intel_sdvo_get_pixel_multiplier(mode)) {
1256 case 1: 1262 case 1:
1257 intel_sdvo_set_clock_rate_mult(output, 1263 intel_sdvo_set_clock_rate_mult(intel_encoder,
1258 SDVO_CLOCK_RATE_MULT_1X); 1264 SDVO_CLOCK_RATE_MULT_1X);
1259 break; 1265 break;
1260 case 2: 1266 case 2:
1261 intel_sdvo_set_clock_rate_mult(output, 1267 intel_sdvo_set_clock_rate_mult(intel_encoder,
1262 SDVO_CLOCK_RATE_MULT_2X); 1268 SDVO_CLOCK_RATE_MULT_2X);
1263 break; 1269 break;
1264 case 4: 1270 case 4:
1265 intel_sdvo_set_clock_rate_mult(output, 1271 intel_sdvo_set_clock_rate_mult(intel_encoder,
1266 SDVO_CLOCK_RATE_MULT_4X); 1272 SDVO_CLOCK_RATE_MULT_4X);
1267 break; 1273 break;
1268 } 1274 }
@@ -1273,8 +1279,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1273 SDVO_VSYNC_ACTIVE_HIGH | 1279 SDVO_VSYNC_ACTIVE_HIGH |
1274 SDVO_HSYNC_ACTIVE_HIGH; 1280 SDVO_HSYNC_ACTIVE_HIGH;
1275 } else { 1281 } else {
1276 sdvox |= I915_READ(sdvo_priv->output_device); 1282 sdvox |= I915_READ(sdvo_priv->sdvo_reg);
1277 switch (sdvo_priv->output_device) { 1283 switch (sdvo_priv->sdvo_reg) {
1278 case SDVOB: 1284 case SDVOB:
1279 sdvox &= SDVOB_PRESERVE_MASK; 1285 sdvox &= SDVOB_PRESERVE_MASK;
1280 break; 1286 break;
@@ -1298,26 +1304,26 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1298 1304
1299 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL) 1305 if (sdvo_priv->sdvo_flags & SDVO_NEED_TO_STALL)
1300 sdvox |= SDVO_STALL_SELECT; 1306 sdvox |= SDVO_STALL_SELECT;
1301 intel_sdvo_write_sdvox(output, sdvox); 1307 intel_sdvo_write_sdvox(intel_encoder, sdvox);
1302} 1308}
1303 1309
1304static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1310static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1305{ 1311{
1306 struct drm_device *dev = encoder->dev; 1312 struct drm_device *dev = encoder->dev;
1307 struct drm_i915_private *dev_priv = dev->dev_private; 1313 struct drm_i915_private *dev_priv = dev->dev_private;
1308 struct intel_output *intel_output = enc_to_intel_output(encoder); 1314 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1309 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1315 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1310 u32 temp; 1316 u32 temp;
1311 1317
1312 if (mode != DRM_MODE_DPMS_ON) { 1318 if (mode != DRM_MODE_DPMS_ON) {
1313 intel_sdvo_set_active_outputs(intel_output, 0); 1319 intel_sdvo_set_active_outputs(intel_encoder, 0);
1314 if (0) 1320 if (0)
1315 intel_sdvo_set_encoder_power_state(intel_output, mode); 1321 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1316 1322
1317 if (mode == DRM_MODE_DPMS_OFF) { 1323 if (mode == DRM_MODE_DPMS_OFF) {
1318 temp = I915_READ(sdvo_priv->output_device); 1324 temp = I915_READ(sdvo_priv->sdvo_reg);
1319 if ((temp & SDVO_ENABLE) != 0) { 1325 if ((temp & SDVO_ENABLE) != 0) {
1320 intel_sdvo_write_sdvox(intel_output, temp & ~SDVO_ENABLE); 1326 intel_sdvo_write_sdvox(intel_encoder, temp & ~SDVO_ENABLE);
1321 } 1327 }
1322 } 1328 }
1323 } else { 1329 } else {
@@ -1325,13 +1331,13 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1325 int i; 1331 int i;
1326 u8 status; 1332 u8 status;
1327 1333
1328 temp = I915_READ(sdvo_priv->output_device); 1334 temp = I915_READ(sdvo_priv->sdvo_reg);
1329 if ((temp & SDVO_ENABLE) == 0) 1335 if ((temp & SDVO_ENABLE) == 0)
1330 intel_sdvo_write_sdvox(intel_output, temp | SDVO_ENABLE); 1336 intel_sdvo_write_sdvox(intel_encoder, temp | SDVO_ENABLE);
1331 for (i = 0; i < 2; i++) 1337 for (i = 0; i < 2; i++)
1332 intel_wait_for_vblank(dev); 1338 intel_wait_for_vblank(dev);
1333 1339
1334 status = intel_sdvo_get_trained_inputs(intel_output, &input1, 1340 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1,
1335 &input2); 1341 &input2);
1336 1342
1337 1343
@@ -1345,8 +1351,8 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
1345 } 1351 }
1346 1352
1347 if (0) 1353 if (0)
1348 intel_sdvo_set_encoder_power_state(intel_output, mode); 1354 intel_sdvo_set_encoder_power_state(intel_encoder, mode);
1349 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output); 1355 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->controlled_output);
1350 } 1356 }
1351 return; 1357 return;
1352} 1358}
@@ -1355,22 +1361,22 @@ static void intel_sdvo_save(struct drm_connector *connector)
1355{ 1361{
1356 struct drm_device *dev = connector->dev; 1362 struct drm_device *dev = connector->dev;
1357 struct drm_i915_private *dev_priv = dev->dev_private; 1363 struct drm_i915_private *dev_priv = dev->dev_private;
1358 struct intel_output *intel_output = to_intel_output(connector); 1364 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1359 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1365 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1360 int o; 1366 int o;
1361 1367
1362 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_output); 1368 sdvo_priv->save_sdvo_mult = intel_sdvo_get_clock_rate_mult(intel_encoder);
1363 intel_sdvo_get_active_outputs(intel_output, &sdvo_priv->save_active_outputs); 1369 intel_sdvo_get_active_outputs(intel_encoder, &sdvo_priv->save_active_outputs);
1364 1370
1365 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1366 intel_sdvo_set_target_input(intel_output, true, false); 1372 intel_sdvo_set_target_input(intel_encoder, true, false);
1367 intel_sdvo_get_input_timing(intel_output, 1373 intel_sdvo_get_input_timing(intel_encoder,
1368 &sdvo_priv->save_input_dtd_1); 1374 &sdvo_priv->save_input_dtd_1);
1369 } 1375 }
1370 1376
1371 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1377 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1372 intel_sdvo_set_target_input(intel_output, false, true); 1378 intel_sdvo_set_target_input(intel_encoder, false, true);
1373 intel_sdvo_get_input_timing(intel_output, 1379 intel_sdvo_get_input_timing(intel_encoder,
1374 &sdvo_priv->save_input_dtd_2); 1380 &sdvo_priv->save_input_dtd_2);
1375 } 1381 }
1376 1382
@@ -1379,8 +1385,8 @@ static void intel_sdvo_save(struct drm_connector *connector)
1379 u16 this_output = (1 << o); 1385 u16 this_output = (1 << o);
1380 if (sdvo_priv->caps.output_flags & this_output) 1386 if (sdvo_priv->caps.output_flags & this_output)
1381 { 1387 {
1382 intel_sdvo_set_target_output(intel_output, this_output); 1388 intel_sdvo_set_target_output(intel_encoder, this_output);
1383 intel_sdvo_get_output_timing(intel_output, 1389 intel_sdvo_get_output_timing(intel_encoder,
1384 &sdvo_priv->save_output_dtd[o]); 1390 &sdvo_priv->save_output_dtd[o]);
1385 } 1391 }
1386 } 1392 }
@@ -1388,66 +1394,66 @@ static void intel_sdvo_save(struct drm_connector *connector)
1388 /* XXX: Save TV format/enhancements. */ 1394 /* XXX: Save TV format/enhancements. */
1389 } 1395 }
1390 1396
1391 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1397 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->sdvo_reg);
1392} 1398}
1393 1399
1394static void intel_sdvo_restore(struct drm_connector *connector) 1400static void intel_sdvo_restore(struct drm_connector *connector)
1395{ 1401{
1396 struct drm_device *dev = connector->dev; 1402 struct drm_device *dev = connector->dev;
1397 struct intel_output *intel_output = to_intel_output(connector); 1403 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1398 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1404 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1399 int o; 1405 int o;
1400 int i; 1406 int i;
1401 bool input1, input2; 1407 bool input1, input2;
1402 u8 status; 1408 u8 status;
1403 1409
1404 intel_sdvo_set_active_outputs(intel_output, 0); 1410 intel_sdvo_set_active_outputs(intel_encoder, 0);
1405 1411
1406 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++) 1412 for (o = SDVO_OUTPUT_FIRST; o <= SDVO_OUTPUT_LAST; o++)
1407 { 1413 {
1408 u16 this_output = (1 << o); 1414 u16 this_output = (1 << o);
1409 if (sdvo_priv->caps.output_flags & this_output) { 1415 if (sdvo_priv->caps.output_flags & this_output) {
1410 intel_sdvo_set_target_output(intel_output, this_output); 1416 intel_sdvo_set_target_output(intel_encoder, this_output);
1411 intel_sdvo_set_output_timing(intel_output, &sdvo_priv->save_output_dtd[o]); 1417 intel_sdvo_set_output_timing(intel_encoder, &sdvo_priv->save_output_dtd[o]);
1412 } 1418 }
1413 } 1419 }
1414 1420
1415 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) { 1421 if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
1416 intel_sdvo_set_target_input(intel_output, true, false); 1422 intel_sdvo_set_target_input(intel_encoder, true, false);
1417 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_1); 1423 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_1);
1418 } 1424 }
1419 1425
1420 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) { 1426 if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
1421 intel_sdvo_set_target_input(intel_output, false, true); 1427 intel_sdvo_set_target_input(intel_encoder, false, true);
1422 intel_sdvo_set_input_timing(intel_output, &sdvo_priv->save_input_dtd_2); 1428 intel_sdvo_set_input_timing(intel_encoder, &sdvo_priv->save_input_dtd_2);
1423 } 1429 }
1424 1430
1425 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1431 intel_sdvo_set_clock_rate_mult(intel_encoder, sdvo_priv->save_sdvo_mult);
1426 1432
1427 if (sdvo_priv->is_tv) { 1433 if (sdvo_priv->is_tv) {
1428 /* XXX: Restore TV format/enhancements. */ 1434 /* XXX: Restore TV format/enhancements. */
1429 } 1435 }
1430 1436
1431 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX); 1437 intel_sdvo_write_sdvox(intel_encoder, sdvo_priv->save_SDVOX);
1432 1438
1433 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1439 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
1434 { 1440 {
1435 for (i = 0; i < 2; i++) 1441 for (i = 0; i < 2; i++)
1436 intel_wait_for_vblank(dev); 1442 intel_wait_for_vblank(dev);
1437 status = intel_sdvo_get_trained_inputs(intel_output, &input1, &input2); 1443 status = intel_sdvo_get_trained_inputs(intel_encoder, &input1, &input2);
1438 if (status == SDVO_CMD_STATUS_SUCCESS && !input1) 1444 if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
1439 DRM_DEBUG_KMS("First %s output reported failure to " 1445 DRM_DEBUG_KMS("First %s output reported failure to "
1440 "sync\n", SDVO_NAME(sdvo_priv)); 1446 "sync\n", SDVO_NAME(sdvo_priv));
1441 } 1447 }
1442 1448
1443 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->save_active_outputs); 1449 intel_sdvo_set_active_outputs(intel_encoder, sdvo_priv->save_active_outputs);
1444} 1450}
1445 1451
1446static int intel_sdvo_mode_valid(struct drm_connector *connector, 1452static int intel_sdvo_mode_valid(struct drm_connector *connector,
1447 struct drm_display_mode *mode) 1453 struct drm_display_mode *mode)
1448{ 1454{
1449 struct intel_output *intel_output = to_intel_output(connector); 1455 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1450 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1456 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1451 1457
1452 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1458 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
1453 return MODE_NO_DBLESCAN; 1459 return MODE_NO_DBLESCAN;
@@ -1472,12 +1478,12 @@ static int intel_sdvo_mode_valid(struct drm_connector *connector,
1472 return MODE_OK; 1478 return MODE_OK;
1473} 1479}
1474 1480
1475static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struct intel_sdvo_caps *caps) 1481static bool intel_sdvo_get_capabilities(struct intel_encoder *intel_encoder, struct intel_sdvo_caps *caps)
1476{ 1482{
1477 u8 status; 1483 u8 status;
1478 1484
1479 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0); 1485 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_DEVICE_CAPS, NULL, 0);
1480 status = intel_sdvo_read_response(intel_output, caps, sizeof(*caps)); 1486 status = intel_sdvo_read_response(intel_encoder, caps, sizeof(*caps));
1481 if (status != SDVO_CMD_STATUS_SUCCESS) 1487 if (status != SDVO_CMD_STATUS_SUCCESS)
1482 return false; 1488 return false;
1483 1489
@@ -1487,22 +1493,22 @@ static bool intel_sdvo_get_capabilities(struct intel_output *intel_output, struc
1487struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) 1493struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB)
1488{ 1494{
1489 struct drm_connector *connector = NULL; 1495 struct drm_connector *connector = NULL;
1490 struct intel_output *iout = NULL; 1496 struct intel_encoder *iout = NULL;
1491 struct intel_sdvo_priv *sdvo; 1497 struct intel_sdvo_priv *sdvo;
1492 1498
1493 /* find the sdvo connector */ 1499 /* find the sdvo connector */
1494 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1500 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1495 iout = to_intel_output(connector); 1501 iout = to_intel_encoder(connector);
1496 1502
1497 if (iout->type != INTEL_OUTPUT_SDVO) 1503 if (iout->type != INTEL_OUTPUT_SDVO)
1498 continue; 1504 continue;
1499 1505
1500 sdvo = iout->dev_priv; 1506 sdvo = iout->dev_priv;
1501 1507
1502 if (sdvo->output_device == SDVOB && sdvoB) 1508 if (sdvo->sdvo_reg == SDVOB && sdvoB)
1503 return connector; 1509 return connector;
1504 1510
1505 if (sdvo->output_device == SDVOC && !sdvoB) 1511 if (sdvo->sdvo_reg == SDVOC && !sdvoB)
1506 return connector; 1512 return connector;
1507 1513
1508 } 1514 }
@@ -1514,16 +1520,16 @@ int intel_sdvo_supports_hotplug(struct drm_connector *connector)
1514{ 1520{
1515 u8 response[2]; 1521 u8 response[2];
1516 u8 status; 1522 u8 status;
1517 struct intel_output *intel_output; 1523 struct intel_encoder *intel_encoder;
1518 DRM_DEBUG_KMS("\n"); 1524 DRM_DEBUG_KMS("\n");
1519 1525
1520 if (!connector) 1526 if (!connector)
1521 return 0; 1527 return 0;
1522 1528
1523 intel_output = to_intel_output(connector); 1529 intel_encoder = to_intel_encoder(connector);
1524 1530
1525 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1531 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1526 status = intel_sdvo_read_response(intel_output, &response, 2); 1532 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1527 1533
1528 if (response[0] !=0) 1534 if (response[0] !=0)
1529 return 1; 1535 return 1;
@@ -1535,30 +1541,30 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
1535{ 1541{
1536 u8 response[2]; 1542 u8 response[2];
1537 u8 status; 1543 u8 status;
1538 struct intel_output *intel_output = to_intel_output(connector); 1544 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1539 1545
1540 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1546 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1541 intel_sdvo_read_response(intel_output, &response, 2); 1547 intel_sdvo_read_response(intel_encoder, &response, 2);
1542 1548
1543 if (on) { 1549 if (on) {
1544 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); 1550 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
1545 status = intel_sdvo_read_response(intel_output, &response, 2); 1551 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1546 1552
1547 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1553 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1548 } else { 1554 } else {
1549 response[0] = 0; 1555 response[0] = 0;
1550 response[1] = 0; 1556 response[1] = 0;
1551 intel_sdvo_write_cmd(intel_output, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); 1557 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
1552 } 1558 }
1553 1559
1554 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); 1560 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
1555 intel_sdvo_read_response(intel_output, &response, 2); 1561 intel_sdvo_read_response(intel_encoder, &response, 2);
1556} 1562}
1557 1563
1558static bool 1564static bool
1559intel_sdvo_multifunc_encoder(struct intel_output *intel_output) 1565intel_sdvo_multifunc_encoder(struct intel_encoder *intel_encoder)
1560{ 1566{
1561 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1567 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1562 int caps = 0; 1568 int caps = 0;
1563 1569
1564 if (sdvo_priv->caps.output_flags & 1570 if (sdvo_priv->caps.output_flags &
@@ -1592,11 +1598,11 @@ static struct drm_connector *
1592intel_find_analog_connector(struct drm_device *dev) 1598intel_find_analog_connector(struct drm_device *dev)
1593{ 1599{
1594 struct drm_connector *connector; 1600 struct drm_connector *connector;
1595 struct intel_output *intel_output; 1601 struct intel_encoder *intel_encoder;
1596 1602
1597 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1603 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1598 intel_output = to_intel_output(connector); 1604 intel_encoder = to_intel_encoder(connector);
1599 if (intel_output->type == INTEL_OUTPUT_ANALOG) 1605 if (intel_encoder->type == INTEL_OUTPUT_ANALOG)
1600 return connector; 1606 return connector;
1601 } 1607 }
1602 return NULL; 1608 return NULL;
@@ -1621,16 +1627,16 @@ intel_analog_is_connected(struct drm_device *dev)
1621enum drm_connector_status 1627enum drm_connector_status
1622intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response) 1628intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1623{ 1629{
1624 struct intel_output *intel_output = to_intel_output(connector); 1630 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1625 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1631 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1626 enum drm_connector_status status = connector_status_connected; 1632 enum drm_connector_status status = connector_status_connected;
1627 struct edid *edid = NULL; 1633 struct edid *edid = NULL;
1628 1634
1629 edid = drm_get_edid(&intel_output->base, 1635 edid = drm_get_edid(&intel_encoder->base,
1630 intel_output->ddc_bus); 1636 intel_encoder->ddc_bus);
1631 1637
1632 /* This is only applied to SDVO cards with multiple outputs */ 1638 /* This is only applied to SDVO cards with multiple outputs */
1633 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_output)) { 1639 if (edid == NULL && intel_sdvo_multifunc_encoder(intel_encoder)) {
1634 uint8_t saved_ddc, temp_ddc; 1640 uint8_t saved_ddc, temp_ddc;
1635 saved_ddc = sdvo_priv->ddc_bus; 1641 saved_ddc = sdvo_priv->ddc_bus;
1636 temp_ddc = sdvo_priv->ddc_bus >> 1; 1642 temp_ddc = sdvo_priv->ddc_bus >> 1;
@@ -1640,8 +1646,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1640 */ 1646 */
1641 while(temp_ddc > 1) { 1647 while(temp_ddc > 1) {
1642 sdvo_priv->ddc_bus = temp_ddc; 1648 sdvo_priv->ddc_bus = temp_ddc;
1643 edid = drm_get_edid(&intel_output->base, 1649 edid = drm_get_edid(&intel_encoder->base,
1644 intel_output->ddc_bus); 1650 intel_encoder->ddc_bus);
1645 if (edid) { 1651 if (edid) {
1646 /* 1652 /*
1647 * When we can get the EDID, maybe it is the 1653 * When we can get the EDID, maybe it is the
@@ -1660,8 +1666,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1660 */ 1666 */
1661 if (edid == NULL && 1667 if (edid == NULL &&
1662 sdvo_priv->analog_ddc_bus && 1668 sdvo_priv->analog_ddc_bus &&
1663 !intel_analog_is_connected(intel_output->base.dev)) 1669 !intel_analog_is_connected(intel_encoder->base.dev))
1664 edid = drm_get_edid(&intel_output->base, 1670 edid = drm_get_edid(&intel_encoder->base,
1665 sdvo_priv->analog_ddc_bus); 1671 sdvo_priv->analog_ddc_bus);
1666 if (edid != NULL) { 1672 if (edid != NULL) {
1667 /* Don't report the output as connected if it's a DVI-I 1673 /* Don't report the output as connected if it's a DVI-I
@@ -1676,7 +1682,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector, u16 response)
1676 } 1682 }
1677 1683
1678 kfree(edid); 1684 kfree(edid);
1679 intel_output->base.display_info.raw_edid = NULL; 1685 intel_encoder->base.display_info.raw_edid = NULL;
1680 1686
1681 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) 1687 } else if (response & (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1))
1682 status = connector_status_disconnected; 1688 status = connector_status_disconnected;
@@ -1688,16 +1694,16 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1688{ 1694{
1689 uint16_t response; 1695 uint16_t response;
1690 u8 status; 1696 u8 status;
1691 struct intel_output *intel_output = to_intel_output(connector); 1697 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1692 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1698 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1693 1699
1694 intel_sdvo_write_cmd(intel_output, 1700 intel_sdvo_write_cmd(intel_encoder,
1695 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1701 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1696 if (sdvo_priv->is_tv) { 1702 if (sdvo_priv->is_tv) {
1697 /* add 30ms delay when the output type is SDVO-TV */ 1703 /* add 30ms delay when the output type is SDVO-TV */
1698 mdelay(30); 1704 mdelay(30);
1699 } 1705 }
1700 status = intel_sdvo_read_response(intel_output, &response, 2); 1706 status = intel_sdvo_read_response(intel_encoder, &response, 2);
1701 1707
1702 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1708 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
1703 1709
@@ -1707,10 +1713,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1707 if (response == 0) 1713 if (response == 0)
1708 return connector_status_disconnected; 1714 return connector_status_disconnected;
1709 1715
1710 if (intel_sdvo_multifunc_encoder(intel_output) && 1716 if (intel_sdvo_multifunc_encoder(intel_encoder) &&
1711 sdvo_priv->attached_output != response) { 1717 sdvo_priv->attached_output != response) {
1712 if (sdvo_priv->controlled_output != response && 1718 if (sdvo_priv->controlled_output != response &&
1713 intel_sdvo_output_setup(intel_output, response) != true) 1719 intel_sdvo_output_setup(intel_encoder, response) != true)
1714 return connector_status_unknown; 1720 return connector_status_unknown;
1715 sdvo_priv->attached_output = response; 1721 sdvo_priv->attached_output = response;
1716 } 1722 }
@@ -1719,12 +1725,12 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1719 1725
1720static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1726static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1721{ 1727{
1722 struct intel_output *intel_output = to_intel_output(connector); 1728 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1723 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1729 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1724 int num_modes; 1730 int num_modes;
1725 1731
1726 /* set the bus switch and get the modes */ 1732 /* set the bus switch and get the modes */
1727 num_modes = intel_ddc_get_modes(intel_output); 1733 num_modes = intel_ddc_get_modes(intel_encoder);
1728 1734
1729 /* 1735 /*
1730 * Mac mini hack. On this device, the DVI-I connector shares one DDC 1736 * Mac mini hack. On this device, the DVI-I connector shares one DDC
@@ -1734,17 +1740,17 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1734 */ 1740 */
1735 if (num_modes == 0 && 1741 if (num_modes == 0 &&
1736 sdvo_priv->analog_ddc_bus && 1742 sdvo_priv->analog_ddc_bus &&
1737 !intel_analog_is_connected(intel_output->base.dev)) { 1743 !intel_analog_is_connected(intel_encoder->base.dev)) {
1738 struct i2c_adapter *digital_ddc_bus; 1744 struct i2c_adapter *digital_ddc_bus;
1739 1745
1740 /* Switch to the analog ddc bus and try that 1746 /* Switch to the analog ddc bus and try that
1741 */ 1747 */
1742 digital_ddc_bus = intel_output->ddc_bus; 1748 digital_ddc_bus = intel_encoder->ddc_bus;
1743 intel_output->ddc_bus = sdvo_priv->analog_ddc_bus; 1749 intel_encoder->ddc_bus = sdvo_priv->analog_ddc_bus;
1744 1750
1745 (void) intel_ddc_get_modes(intel_output); 1751 (void) intel_ddc_get_modes(intel_encoder);
1746 1752
1747 intel_output->ddc_bus = digital_ddc_bus; 1753 intel_encoder->ddc_bus = digital_ddc_bus;
1748 } 1754 }
1749} 1755}
1750 1756
@@ -1815,7 +1821,7 @@ struct drm_display_mode sdvo_tv_modes[] = {
1815 1821
1816static void intel_sdvo_get_tv_modes(struct drm_connector *connector) 1822static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1817{ 1823{
1818 struct intel_output *output = to_intel_output(connector); 1824 struct intel_encoder *output = to_intel_encoder(connector);
1819 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1825 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1820 struct intel_sdvo_sdtv_resolution_request tv_res; 1826 struct intel_sdvo_sdtv_resolution_request tv_res;
1821 uint32_t reply = 0, format_map = 0; 1827 uint32_t reply = 0, format_map = 0;
@@ -1857,9 +1863,9 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1857 1863
1858static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1864static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1859{ 1865{
1860 struct intel_output *intel_output = to_intel_output(connector); 1866 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1861 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1867 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1862 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1868 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1863 struct drm_display_mode *newmode; 1869 struct drm_display_mode *newmode;
1864 1870
1865 /* 1871 /*
@@ -1867,7 +1873,7 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1867 * Assume that the preferred modes are 1873 * Assume that the preferred modes are
1868 * arranged in priority order. 1874 * arranged in priority order.
1869 */ 1875 */
1870 intel_ddc_get_modes(intel_output); 1876 intel_ddc_get_modes(intel_encoder);
1871 if (list_empty(&connector->probed_modes) == false) 1877 if (list_empty(&connector->probed_modes) == false)
1872 goto end; 1878 goto end;
1873 1879
@@ -1896,7 +1902,7 @@ end:
1896 1902
1897static int intel_sdvo_get_modes(struct drm_connector *connector) 1903static int intel_sdvo_get_modes(struct drm_connector *connector)
1898{ 1904{
1899 struct intel_output *output = to_intel_output(connector); 1905 struct intel_encoder *output = to_intel_encoder(connector);
1900 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 1906 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1901 1907
1902 if (sdvo_priv->is_tv) 1908 if (sdvo_priv->is_tv)
@@ -1914,8 +1920,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1914static 1920static
1915void intel_sdvo_destroy_enhance_property(struct drm_connector *connector) 1921void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1916{ 1922{
1917 struct intel_output *intel_output = to_intel_output(connector); 1923 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1918 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1924 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1919 struct drm_device *dev = connector->dev; 1925 struct drm_device *dev = connector->dev;
1920 1926
1921 if (sdvo_priv->is_tv) { 1927 if (sdvo_priv->is_tv) {
@@ -1952,13 +1958,13 @@ void intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1952 1958
1953static void intel_sdvo_destroy(struct drm_connector *connector) 1959static void intel_sdvo_destroy(struct drm_connector *connector)
1954{ 1960{
1955 struct intel_output *intel_output = to_intel_output(connector); 1961 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1956 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1962 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1957 1963
1958 if (intel_output->i2c_bus) 1964 if (intel_encoder->i2c_bus)
1959 intel_i2c_destroy(intel_output->i2c_bus); 1965 intel_i2c_destroy(intel_encoder->i2c_bus);
1960 if (intel_output->ddc_bus) 1966 if (intel_encoder->ddc_bus)
1961 intel_i2c_destroy(intel_output->ddc_bus); 1967 intel_i2c_destroy(intel_encoder->ddc_bus);
1962 if (sdvo_priv->analog_ddc_bus) 1968 if (sdvo_priv->analog_ddc_bus)
1963 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 1969 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
1964 1970
@@ -1976,7 +1982,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1976 drm_sysfs_connector_remove(connector); 1982 drm_sysfs_connector_remove(connector);
1977 drm_connector_cleanup(connector); 1983 drm_connector_cleanup(connector);
1978 1984
1979 kfree(intel_output); 1985 kfree(intel_encoder);
1980} 1986}
1981 1987
1982static int 1988static int
@@ -1984,9 +1990,9 @@ intel_sdvo_set_property(struct drm_connector *connector,
1984 struct drm_property *property, 1990 struct drm_property *property,
1985 uint64_t val) 1991 uint64_t val)
1986{ 1992{
1987 struct intel_output *intel_output = to_intel_output(connector); 1993 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1988 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1994 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
1989 struct drm_encoder *encoder = &intel_output->enc; 1995 struct drm_encoder *encoder = &intel_encoder->enc;
1990 struct drm_crtc *crtc = encoder->crtc; 1996 struct drm_crtc *crtc = encoder->crtc;
1991 int ret = 0; 1997 int ret = 0;
1992 bool changed = false; 1998 bool changed = false;
@@ -2094,8 +2100,8 @@ intel_sdvo_set_property(struct drm_connector *connector,
2094 sdvo_priv->cur_brightness = temp_value; 2100 sdvo_priv->cur_brightness = temp_value;
2095 } 2101 }
2096 if (cmd) { 2102 if (cmd) {
2097 intel_sdvo_write_cmd(intel_output, cmd, &temp_value, 2); 2103 intel_sdvo_write_cmd(intel_encoder, cmd, &temp_value, 2);
2098 status = intel_sdvo_read_response(intel_output, 2104 status = intel_sdvo_read_response(intel_encoder,
2099 NULL, 0); 2105 NULL, 0);
2100 if (status != SDVO_CMD_STATUS_SUCCESS) { 2106 if (status != SDVO_CMD_STATUS_SUCCESS) {
2101 DRM_DEBUG_KMS("Incorrect SDVO command \n"); 2107 DRM_DEBUG_KMS("Incorrect SDVO command \n");
@@ -2190,7 +2196,7 @@ intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
2190} 2196}
2191 2197
2192static bool 2198static bool
2193intel_sdvo_get_digital_encoding_mode(struct intel_output *output) 2199intel_sdvo_get_digital_encoding_mode(struct intel_encoder *output)
2194{ 2200{
2195 struct intel_sdvo_priv *sdvo_priv = output->dev_priv; 2201 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
2196 uint8_t status; 2202 uint8_t status;
@@ -2204,42 +2210,42 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
2204 return true; 2210 return true;
2205} 2211}
2206 2212
2207static struct intel_output * 2213static struct intel_encoder *
2208intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan) 2214intel_sdvo_chan_to_intel_encoder(struct intel_i2c_chan *chan)
2209{ 2215{
2210 struct drm_device *dev = chan->drm_dev; 2216 struct drm_device *dev = chan->drm_dev;
2211 struct drm_connector *connector; 2217 struct drm_connector *connector;
2212 struct intel_output *intel_output = NULL; 2218 struct intel_encoder *intel_encoder = NULL;
2213 2219
2214 list_for_each_entry(connector, 2220 list_for_each_entry(connector,
2215 &dev->mode_config.connector_list, head) { 2221 &dev->mode_config.connector_list, head) {
2216 if (to_intel_output(connector)->ddc_bus == &chan->adapter) { 2222 if (to_intel_encoder(connector)->ddc_bus == &chan->adapter) {
2217 intel_output = to_intel_output(connector); 2223 intel_encoder = to_intel_encoder(connector);
2218 break; 2224 break;
2219 } 2225 }
2220 } 2226 }
2221 return intel_output; 2227 return intel_encoder;
2222} 2228}
2223 2229
2224static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap, 2230static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
2225 struct i2c_msg msgs[], int num) 2231 struct i2c_msg msgs[], int num)
2226{ 2232{
2227 struct intel_output *intel_output; 2233 struct intel_encoder *intel_encoder;
2228 struct intel_sdvo_priv *sdvo_priv; 2234 struct intel_sdvo_priv *sdvo_priv;
2229 struct i2c_algo_bit_data *algo_data; 2235 struct i2c_algo_bit_data *algo_data;
2230 const struct i2c_algorithm *algo; 2236 const struct i2c_algorithm *algo;
2231 2237
2232 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data; 2238 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
2233 intel_output = 2239 intel_encoder =
2234 intel_sdvo_chan_to_intel_output( 2240 intel_sdvo_chan_to_intel_encoder(
2235 (struct intel_i2c_chan *)(algo_data->data)); 2241 (struct intel_i2c_chan *)(algo_data->data));
2236 if (intel_output == NULL) 2242 if (intel_encoder == NULL)
2237 return -EINVAL; 2243 return -EINVAL;
2238 2244
2239 sdvo_priv = intel_output->dev_priv; 2245 sdvo_priv = intel_encoder->dev_priv;
2240 algo = intel_output->i2c_bus->algo; 2246 algo = intel_encoder->i2c_bus->algo;
2241 2247
2242 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); 2248 intel_sdvo_set_control_bus_switch(intel_encoder, sdvo_priv->ddc_bus);
2243 return algo->master_xfer(i2c_adap, msgs, num); 2249 return algo->master_xfer(i2c_adap, msgs, num);
2244} 2250}
2245 2251
@@ -2248,12 +2254,12 @@ static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
2248}; 2254};
2249 2255
2250static u8 2256static u8
2251intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) 2257intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
2252{ 2258{
2253 struct drm_i915_private *dev_priv = dev->dev_private; 2259 struct drm_i915_private *dev_priv = dev->dev_private;
2254 struct sdvo_device_mapping *my_mapping, *other_mapping; 2260 struct sdvo_device_mapping *my_mapping, *other_mapping;
2255 2261
2256 if (output_device == SDVOB) { 2262 if (sdvo_reg == SDVOB) {
2257 my_mapping = &dev_priv->sdvo_mappings[0]; 2263 my_mapping = &dev_priv->sdvo_mappings[0];
2258 other_mapping = &dev_priv->sdvo_mappings[1]; 2264 other_mapping = &dev_priv->sdvo_mappings[1];
2259 } else { 2265 } else {
@@ -2278,7 +2284,7 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
2278 /* No SDVO device info is found for another DVO port, 2284 /* No SDVO device info is found for another DVO port,
2279 * so use mapping assumption we had before BIOS parsing. 2285 * so use mapping assumption we had before BIOS parsing.
2280 */ 2286 */
2281 if (output_device == SDVOB) 2287 if (sdvo_reg == SDVOB)
2282 return 0x70; 2288 return 0x70;
2283 else 2289 else
2284 return 0x72; 2290 return 0x72;
@@ -2304,15 +2310,15 @@ static struct dmi_system_id intel_sdvo_bad_tv[] = {
2304}; 2310};
2305 2311
2306static bool 2312static bool
2307intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) 2313intel_sdvo_output_setup(struct intel_encoder *intel_encoder, uint16_t flags)
2308{ 2314{
2309 struct drm_connector *connector = &intel_output->base; 2315 struct drm_connector *connector = &intel_encoder->base;
2310 struct drm_encoder *encoder = &intel_output->enc; 2316 struct drm_encoder *encoder = &intel_encoder->enc;
2311 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2317 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2312 bool ret = true, registered = false; 2318 bool ret = true, registered = false;
2313 2319
2314 sdvo_priv->is_tv = false; 2320 sdvo_priv->is_tv = false;
2315 intel_output->needs_tv_clock = false; 2321 intel_encoder->needs_tv_clock = false;
2316 sdvo_priv->is_lvds = false; 2322 sdvo_priv->is_lvds = false;
2317 2323
2318 if (device_is_registered(&connector->kdev)) { 2324 if (device_is_registered(&connector->kdev)) {
@@ -2330,16 +2336,16 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2330 encoder->encoder_type = DRM_MODE_ENCODER_TMDS; 2336 encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
2331 connector->connector_type = DRM_MODE_CONNECTOR_DVID; 2337 connector->connector_type = DRM_MODE_CONNECTOR_DVID;
2332 2338
2333 if (intel_sdvo_get_supp_encode(intel_output, 2339 if (intel_sdvo_get_supp_encode(intel_encoder,
2334 &sdvo_priv->encode) && 2340 &sdvo_priv->encode) &&
2335 intel_sdvo_get_digital_encoding_mode(intel_output) && 2341 intel_sdvo_get_digital_encoding_mode(intel_encoder) &&
2336 sdvo_priv->is_hdmi) { 2342 sdvo_priv->is_hdmi) {
2337 /* enable hdmi encoding mode if supported */ 2343 /* enable hdmi encoding mode if supported */
2338 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI); 2344 intel_sdvo_set_encode(intel_encoder, SDVO_ENCODE_HDMI);
2339 intel_sdvo_set_colorimetry(intel_output, 2345 intel_sdvo_set_colorimetry(intel_encoder,
2340 SDVO_COLORIMETRY_RGB256); 2346 SDVO_COLORIMETRY_RGB256);
2341 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2347 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2342 intel_output->clone_mask = 2348 intel_encoder->clone_mask =
2343 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2349 (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2344 (1 << INTEL_ANALOG_CLONE_BIT); 2350 (1 << INTEL_ANALOG_CLONE_BIT);
2345 } 2351 }
@@ -2350,21 +2356,21 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2350 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2356 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2351 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2357 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2352 sdvo_priv->is_tv = true; 2358 sdvo_priv->is_tv = true;
2353 intel_output->needs_tv_clock = true; 2359 intel_encoder->needs_tv_clock = true;
2354 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2360 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2355 } else if (flags & SDVO_OUTPUT_RGB0) { 2361 } else if (flags & SDVO_OUTPUT_RGB0) {
2356 2362
2357 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 2363 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
2358 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2364 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2359 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2365 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2360 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2366 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2361 (1 << INTEL_ANALOG_CLONE_BIT); 2367 (1 << INTEL_ANALOG_CLONE_BIT);
2362 } else if (flags & SDVO_OUTPUT_RGB1) { 2368 } else if (flags & SDVO_OUTPUT_RGB1) {
2363 2369
2364 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 2370 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
2365 encoder->encoder_type = DRM_MODE_ENCODER_DAC; 2371 encoder->encoder_type = DRM_MODE_ENCODER_DAC;
2366 connector->connector_type = DRM_MODE_CONNECTOR_VGA; 2372 connector->connector_type = DRM_MODE_CONNECTOR_VGA;
2367 intel_output->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2373 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
2368 (1 << INTEL_ANALOG_CLONE_BIT); 2374 (1 << INTEL_ANALOG_CLONE_BIT);
2369 } else if (flags & SDVO_OUTPUT_CVBS0) { 2375 } else if (flags & SDVO_OUTPUT_CVBS0) {
2370 2376
@@ -2372,15 +2378,15 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2372 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; 2378 encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
2373 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; 2379 connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
2374 sdvo_priv->is_tv = true; 2380 sdvo_priv->is_tv = true;
2375 intel_output->needs_tv_clock = true; 2381 intel_encoder->needs_tv_clock = true;
2376 intel_output->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT; 2382 intel_encoder->clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
2377 } else if (flags & SDVO_OUTPUT_LVDS0) { 2383 } else if (flags & SDVO_OUTPUT_LVDS0) {
2378 2384
2379 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 2385 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
2380 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2386 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2381 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2387 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2382 sdvo_priv->is_lvds = true; 2388 sdvo_priv->is_lvds = true;
2383 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2389 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2384 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2390 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2385 } else if (flags & SDVO_OUTPUT_LVDS1) { 2391 } else if (flags & SDVO_OUTPUT_LVDS1) {
2386 2392
@@ -2388,7 +2394,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2388 encoder->encoder_type = DRM_MODE_ENCODER_LVDS; 2394 encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
2389 connector->connector_type = DRM_MODE_CONNECTOR_LVDS; 2395 connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
2390 sdvo_priv->is_lvds = true; 2396 sdvo_priv->is_lvds = true;
2391 intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) | 2397 intel_encoder->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT) |
2392 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 2398 (1 << INTEL_SDVO_LVDS_CLONE_BIT);
2393 } else { 2399 } else {
2394 2400
@@ -2401,7 +2407,7 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2401 bytes[0], bytes[1]); 2407 bytes[0], bytes[1]);
2402 ret = false; 2408 ret = false;
2403 } 2409 }
2404 intel_output->crtc_mask = (1 << 0) | (1 << 1); 2410 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
2405 2411
2406 if (ret && registered) 2412 if (ret && registered)
2407 ret = drm_sysfs_connector_add(connector) == 0 ? true : false; 2413 ret = drm_sysfs_connector_add(connector) == 0 ? true : false;
@@ -2413,18 +2419,18 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
2413 2419
2414static void intel_sdvo_tv_create_property(struct drm_connector *connector) 2420static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2415{ 2421{
2416 struct intel_output *intel_output = to_intel_output(connector); 2422 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2417 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2423 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2418 struct intel_sdvo_tv_format format; 2424 struct intel_sdvo_tv_format format;
2419 uint32_t format_map, i; 2425 uint32_t format_map, i;
2420 uint8_t status; 2426 uint8_t status;
2421 2427
2422 intel_sdvo_set_target_output(intel_output, 2428 intel_sdvo_set_target_output(intel_encoder,
2423 sdvo_priv->controlled_output); 2429 sdvo_priv->controlled_output);
2424 2430
2425 intel_sdvo_write_cmd(intel_output, 2431 intel_sdvo_write_cmd(intel_encoder,
2426 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0); 2432 SDVO_CMD_GET_SUPPORTED_TV_FORMATS, NULL, 0);
2427 status = intel_sdvo_read_response(intel_output, 2433 status = intel_sdvo_read_response(intel_encoder,
2428 &format, sizeof(format)); 2434 &format, sizeof(format));
2429 if (status != SDVO_CMD_STATUS_SUCCESS) 2435 if (status != SDVO_CMD_STATUS_SUCCESS)
2430 return; 2436 return;
@@ -2462,16 +2468,16 @@ static void intel_sdvo_tv_create_property(struct drm_connector *connector)
2462 2468
2463static void intel_sdvo_create_enhance_property(struct drm_connector *connector) 2469static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2464{ 2470{
2465 struct intel_output *intel_output = to_intel_output(connector); 2471 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
2466 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 2472 struct intel_sdvo_priv *sdvo_priv = intel_encoder->dev_priv;
2467 struct intel_sdvo_enhancements_reply sdvo_data; 2473 struct intel_sdvo_enhancements_reply sdvo_data;
2468 struct drm_device *dev = connector->dev; 2474 struct drm_device *dev = connector->dev;
2469 uint8_t status; 2475 uint8_t status;
2470 uint16_t response, data_value[2]; 2476 uint16_t response, data_value[2];
2471 2477
2472 intel_sdvo_write_cmd(intel_output, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, 2478 intel_sdvo_write_cmd(intel_encoder, SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
2473 NULL, 0); 2479 NULL, 0);
2474 status = intel_sdvo_read_response(intel_output, &sdvo_data, 2480 status = intel_sdvo_read_response(intel_encoder, &sdvo_data,
2475 sizeof(sdvo_data)); 2481 sizeof(sdvo_data));
2476 if (status != SDVO_CMD_STATUS_SUCCESS) { 2482 if (status != SDVO_CMD_STATUS_SUCCESS) {
2477 DRM_DEBUG_KMS(" incorrect response is returned\n"); 2483 DRM_DEBUG_KMS(" incorrect response is returned\n");
@@ -2487,18 +2493,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2487 * property 2493 * property
2488 */ 2494 */
2489 if (sdvo_data.overscan_h) { 2495 if (sdvo_data.overscan_h) {
2490 intel_sdvo_write_cmd(intel_output, 2496 intel_sdvo_write_cmd(intel_encoder,
2491 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0); 2497 SDVO_CMD_GET_MAX_OVERSCAN_H, NULL, 0);
2492 status = intel_sdvo_read_response(intel_output, 2498 status = intel_sdvo_read_response(intel_encoder,
2493 &data_value, 4); 2499 &data_value, 4);
2494 if (status != SDVO_CMD_STATUS_SUCCESS) { 2500 if (status != SDVO_CMD_STATUS_SUCCESS) {
2495 DRM_DEBUG_KMS("Incorrect SDVO max " 2501 DRM_DEBUG_KMS("Incorrect SDVO max "
2496 "h_overscan\n"); 2502 "h_overscan\n");
2497 return; 2503 return;
2498 } 2504 }
2499 intel_sdvo_write_cmd(intel_output, 2505 intel_sdvo_write_cmd(intel_encoder,
2500 SDVO_CMD_GET_OVERSCAN_H, NULL, 0); 2506 SDVO_CMD_GET_OVERSCAN_H, NULL, 0);
2501 status = intel_sdvo_read_response(intel_output, 2507 status = intel_sdvo_read_response(intel_encoder,
2502 &response, 2); 2508 &response, 2);
2503 if (status != SDVO_CMD_STATUS_SUCCESS) { 2509 if (status != SDVO_CMD_STATUS_SUCCESS) {
2504 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n"); 2510 DRM_DEBUG_KMS("Incorrect SDVO h_overscan\n");
@@ -2528,18 +2534,18 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2528 data_value[0], data_value[1], response); 2534 data_value[0], data_value[1], response);
2529 } 2535 }
2530 if (sdvo_data.overscan_v) { 2536 if (sdvo_data.overscan_v) {
2531 intel_sdvo_write_cmd(intel_output, 2537 intel_sdvo_write_cmd(intel_encoder,
2532 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0); 2538 SDVO_CMD_GET_MAX_OVERSCAN_V, NULL, 0);
2533 status = intel_sdvo_read_response(intel_output, 2539 status = intel_sdvo_read_response(intel_encoder,
2534 &data_value, 4); 2540 &data_value, 4);
2535 if (status != SDVO_CMD_STATUS_SUCCESS) { 2541 if (status != SDVO_CMD_STATUS_SUCCESS) {
2536 DRM_DEBUG_KMS("Incorrect SDVO max " 2542 DRM_DEBUG_KMS("Incorrect SDVO max "
2537 "v_overscan\n"); 2543 "v_overscan\n");
2538 return; 2544 return;
2539 } 2545 }
2540 intel_sdvo_write_cmd(intel_output, 2546 intel_sdvo_write_cmd(intel_encoder,
2541 SDVO_CMD_GET_OVERSCAN_V, NULL, 0); 2547 SDVO_CMD_GET_OVERSCAN_V, NULL, 0);
2542 status = intel_sdvo_read_response(intel_output, 2548 status = intel_sdvo_read_response(intel_encoder,
2543 &response, 2); 2549 &response, 2);
2544 if (status != SDVO_CMD_STATUS_SUCCESS) { 2550 if (status != SDVO_CMD_STATUS_SUCCESS) {
2545 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n"); 2551 DRM_DEBUG_KMS("Incorrect SDVO v_overscan\n");
@@ -2569,17 +2575,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2569 data_value[0], data_value[1], response); 2575 data_value[0], data_value[1], response);
2570 } 2576 }
2571 if (sdvo_data.position_h) { 2577 if (sdvo_data.position_h) {
2572 intel_sdvo_write_cmd(intel_output, 2578 intel_sdvo_write_cmd(intel_encoder,
2573 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0); 2579 SDVO_CMD_GET_MAX_POSITION_H, NULL, 0);
2574 status = intel_sdvo_read_response(intel_output, 2580 status = intel_sdvo_read_response(intel_encoder,
2575 &data_value, 4); 2581 &data_value, 4);
2576 if (status != SDVO_CMD_STATUS_SUCCESS) { 2582 if (status != SDVO_CMD_STATUS_SUCCESS) {
2577 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n"); 2583 DRM_DEBUG_KMS("Incorrect SDVO Max h_pos\n");
2578 return; 2584 return;
2579 } 2585 }
2580 intel_sdvo_write_cmd(intel_output, 2586 intel_sdvo_write_cmd(intel_encoder,
2581 SDVO_CMD_GET_POSITION_H, NULL, 0); 2587 SDVO_CMD_GET_POSITION_H, NULL, 0);
2582 status = intel_sdvo_read_response(intel_output, 2588 status = intel_sdvo_read_response(intel_encoder,
2583 &response, 2); 2589 &response, 2);
2584 if (status != SDVO_CMD_STATUS_SUCCESS) { 2590 if (status != SDVO_CMD_STATUS_SUCCESS) {
2585 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n"); 2591 DRM_DEBUG_KMS("Incorrect SDVO get h_postion\n");
@@ -2600,17 +2606,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2600 data_value[0], data_value[1], response); 2606 data_value[0], data_value[1], response);
2601 } 2607 }
2602 if (sdvo_data.position_v) { 2608 if (sdvo_data.position_v) {
2603 intel_sdvo_write_cmd(intel_output, 2609 intel_sdvo_write_cmd(intel_encoder,
2604 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0); 2610 SDVO_CMD_GET_MAX_POSITION_V, NULL, 0);
2605 status = intel_sdvo_read_response(intel_output, 2611 status = intel_sdvo_read_response(intel_encoder,
2606 &data_value, 4); 2612 &data_value, 4);
2607 if (status != SDVO_CMD_STATUS_SUCCESS) { 2613 if (status != SDVO_CMD_STATUS_SUCCESS) {
2608 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n"); 2614 DRM_DEBUG_KMS("Incorrect SDVO Max v_pos\n");
2609 return; 2615 return;
2610 } 2616 }
2611 intel_sdvo_write_cmd(intel_output, 2617 intel_sdvo_write_cmd(intel_encoder,
2612 SDVO_CMD_GET_POSITION_V, NULL, 0); 2618 SDVO_CMD_GET_POSITION_V, NULL, 0);
2613 status = intel_sdvo_read_response(intel_output, 2619 status = intel_sdvo_read_response(intel_encoder,
2614 &response, 2); 2620 &response, 2);
2615 if (status != SDVO_CMD_STATUS_SUCCESS) { 2621 if (status != SDVO_CMD_STATUS_SUCCESS) {
2616 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n"); 2622 DRM_DEBUG_KMS("Incorrect SDVO get v_postion\n");
@@ -2633,17 +2639,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2633 } 2639 }
2634 if (sdvo_priv->is_tv) { 2640 if (sdvo_priv->is_tv) {
2635 if (sdvo_data.saturation) { 2641 if (sdvo_data.saturation) {
2636 intel_sdvo_write_cmd(intel_output, 2642 intel_sdvo_write_cmd(intel_encoder,
2637 SDVO_CMD_GET_MAX_SATURATION, NULL, 0); 2643 SDVO_CMD_GET_MAX_SATURATION, NULL, 0);
2638 status = intel_sdvo_read_response(intel_output, 2644 status = intel_sdvo_read_response(intel_encoder,
2639 &data_value, 4); 2645 &data_value, 4);
2640 if (status != SDVO_CMD_STATUS_SUCCESS) { 2646 if (status != SDVO_CMD_STATUS_SUCCESS) {
2641 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n"); 2647 DRM_DEBUG_KMS("Incorrect SDVO Max sat\n");
2642 return; 2648 return;
2643 } 2649 }
2644 intel_sdvo_write_cmd(intel_output, 2650 intel_sdvo_write_cmd(intel_encoder,
2645 SDVO_CMD_GET_SATURATION, NULL, 0); 2651 SDVO_CMD_GET_SATURATION, NULL, 0);
2646 status = intel_sdvo_read_response(intel_output, 2652 status = intel_sdvo_read_response(intel_encoder,
2647 &response, 2); 2653 &response, 2);
2648 if (status != SDVO_CMD_STATUS_SUCCESS) { 2654 if (status != SDVO_CMD_STATUS_SUCCESS) {
2649 DRM_DEBUG_KMS("Incorrect SDVO get sat\n"); 2655 DRM_DEBUG_KMS("Incorrect SDVO get sat\n");
@@ -2665,17 +2671,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2665 data_value[0], data_value[1], response); 2671 data_value[0], data_value[1], response);
2666 } 2672 }
2667 if (sdvo_data.contrast) { 2673 if (sdvo_data.contrast) {
2668 intel_sdvo_write_cmd(intel_output, 2674 intel_sdvo_write_cmd(intel_encoder,
2669 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0); 2675 SDVO_CMD_GET_MAX_CONTRAST, NULL, 0);
2670 status = intel_sdvo_read_response(intel_output, 2676 status = intel_sdvo_read_response(intel_encoder,
2671 &data_value, 4); 2677 &data_value, 4);
2672 if (status != SDVO_CMD_STATUS_SUCCESS) { 2678 if (status != SDVO_CMD_STATUS_SUCCESS) {
2673 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n"); 2679 DRM_DEBUG_KMS("Incorrect SDVO Max contrast\n");
2674 return; 2680 return;
2675 } 2681 }
2676 intel_sdvo_write_cmd(intel_output, 2682 intel_sdvo_write_cmd(intel_encoder,
2677 SDVO_CMD_GET_CONTRAST, NULL, 0); 2683 SDVO_CMD_GET_CONTRAST, NULL, 0);
2678 status = intel_sdvo_read_response(intel_output, 2684 status = intel_sdvo_read_response(intel_encoder,
2679 &response, 2); 2685 &response, 2);
2680 if (status != SDVO_CMD_STATUS_SUCCESS) { 2686 if (status != SDVO_CMD_STATUS_SUCCESS) {
2681 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n"); 2687 DRM_DEBUG_KMS("Incorrect SDVO get contrast\n");
@@ -2696,17 +2702,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2696 data_value[0], data_value[1], response); 2702 data_value[0], data_value[1], response);
2697 } 2703 }
2698 if (sdvo_data.hue) { 2704 if (sdvo_data.hue) {
2699 intel_sdvo_write_cmd(intel_output, 2705 intel_sdvo_write_cmd(intel_encoder,
2700 SDVO_CMD_GET_MAX_HUE, NULL, 0); 2706 SDVO_CMD_GET_MAX_HUE, NULL, 0);
2701 status = intel_sdvo_read_response(intel_output, 2707 status = intel_sdvo_read_response(intel_encoder,
2702 &data_value, 4); 2708 &data_value, 4);
2703 if (status != SDVO_CMD_STATUS_SUCCESS) { 2709 if (status != SDVO_CMD_STATUS_SUCCESS) {
2704 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n"); 2710 DRM_DEBUG_KMS("Incorrect SDVO Max hue\n");
2705 return; 2711 return;
2706 } 2712 }
2707 intel_sdvo_write_cmd(intel_output, 2713 intel_sdvo_write_cmd(intel_encoder,
2708 SDVO_CMD_GET_HUE, NULL, 0); 2714 SDVO_CMD_GET_HUE, NULL, 0);
2709 status = intel_sdvo_read_response(intel_output, 2715 status = intel_sdvo_read_response(intel_encoder,
2710 &response, 2); 2716 &response, 2);
2711 if (status != SDVO_CMD_STATUS_SUCCESS) { 2717 if (status != SDVO_CMD_STATUS_SUCCESS) {
2712 DRM_DEBUG_KMS("Incorrect SDVO get hue\n"); 2718 DRM_DEBUG_KMS("Incorrect SDVO get hue\n");
@@ -2729,17 +2735,17 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2729 } 2735 }
2730 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) { 2736 if (sdvo_priv->is_tv || sdvo_priv->is_lvds) {
2731 if (sdvo_data.brightness) { 2737 if (sdvo_data.brightness) {
2732 intel_sdvo_write_cmd(intel_output, 2738 intel_sdvo_write_cmd(intel_encoder,
2733 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0); 2739 SDVO_CMD_GET_MAX_BRIGHTNESS, NULL, 0);
2734 status = intel_sdvo_read_response(intel_output, 2740 status = intel_sdvo_read_response(intel_encoder,
2735 &data_value, 4); 2741 &data_value, 4);
2736 if (status != SDVO_CMD_STATUS_SUCCESS) { 2742 if (status != SDVO_CMD_STATUS_SUCCESS) {
2737 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n"); 2743 DRM_DEBUG_KMS("Incorrect SDVO Max bright\n");
2738 return; 2744 return;
2739 } 2745 }
2740 intel_sdvo_write_cmd(intel_output, 2746 intel_sdvo_write_cmd(intel_encoder,
2741 SDVO_CMD_GET_BRIGHTNESS, NULL, 0); 2747 SDVO_CMD_GET_BRIGHTNESS, NULL, 0);
2742 status = intel_sdvo_read_response(intel_output, 2748 status = intel_sdvo_read_response(intel_encoder,
2743 &response, 2); 2749 &response, 2);
2744 if (status != SDVO_CMD_STATUS_SUCCESS) { 2750 if (status != SDVO_CMD_STATUS_SUCCESS) {
2745 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n"); 2751 DRM_DEBUG_KMS("Incorrect SDVO get brigh\n");
@@ -2764,81 +2770,81 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
2764 return; 2770 return;
2765} 2771}
2766 2772
2767bool intel_sdvo_init(struct drm_device *dev, int output_device) 2773bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
2768{ 2774{
2769 struct drm_i915_private *dev_priv = dev->dev_private; 2775 struct drm_i915_private *dev_priv = dev->dev_private;
2770 struct drm_connector *connector; 2776 struct drm_connector *connector;
2771 struct intel_output *intel_output; 2777 struct intel_encoder *intel_encoder;
2772 struct intel_sdvo_priv *sdvo_priv; 2778 struct intel_sdvo_priv *sdvo_priv;
2773 2779
2774 u8 ch[0x40]; 2780 u8 ch[0x40];
2775 int i; 2781 int i;
2776 2782
2777 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 2783 intel_encoder = kcalloc(sizeof(struct intel_encoder)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
2778 if (!intel_output) { 2784 if (!intel_encoder) {
2779 return false; 2785 return false;
2780 } 2786 }
2781 2787
2782 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 2788 sdvo_priv = (struct intel_sdvo_priv *)(intel_encoder + 1);
2783 sdvo_priv->output_device = output_device; 2789 sdvo_priv->sdvo_reg = sdvo_reg;
2784 2790
2785 intel_output->dev_priv = sdvo_priv; 2791 intel_encoder->dev_priv = sdvo_priv;
2786 intel_output->type = INTEL_OUTPUT_SDVO; 2792 intel_encoder->type = INTEL_OUTPUT_SDVO;
2787 2793
2788 /* setup the DDC bus. */ 2794 /* setup the DDC bus. */
2789 if (output_device == SDVOB) 2795 if (sdvo_reg == SDVOB)
2790 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 2796 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
2791 else 2797 else
2792 intel_output->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 2798 intel_encoder->i2c_bus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
2793 2799
2794 if (!intel_output->i2c_bus) 2800 if (!intel_encoder->i2c_bus)
2795 goto err_inteloutput; 2801 goto err_inteloutput;
2796 2802
2797 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device); 2803 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, sdvo_reg);
2798 2804
2799 /* Save the bit-banging i2c functionality for use by the DDC wrapper */ 2805 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
2800 intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality; 2806 intel_sdvo_i2c_bit_algo.functionality = intel_encoder->i2c_bus->algo->functionality;
2801 2807
2802 /* Read the regs to test if we can talk to the device */ 2808 /* Read the regs to test if we can talk to the device */
2803 for (i = 0; i < 0x40; i++) { 2809 for (i = 0; i < 0x40; i++) {
2804 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 2810 if (!intel_sdvo_read_byte(intel_encoder, i, &ch[i])) {
2805 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n", 2811 DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
2806 output_device == SDVOB ? 'B' : 'C'); 2812 sdvo_reg == SDVOB ? 'B' : 'C');
2807 goto err_i2c; 2813 goto err_i2c;
2808 } 2814 }
2809 } 2815 }
2810 2816
2811 /* setup the DDC bus. */ 2817 /* setup the DDC bus. */
2812 if (output_device == SDVOB) { 2818 if (sdvo_reg == SDVOB) {
2813 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2819 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
2814 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2820 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2815 "SDVOB/VGA DDC BUS"); 2821 "SDVOB/VGA DDC BUS");
2816 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2822 dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
2817 } else { 2823 } else {
2818 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2824 intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
2819 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2825 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
2820 "SDVOC/VGA DDC BUS"); 2826 "SDVOC/VGA DDC BUS");
2821 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2827 dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
2822 } 2828 }
2823 2829
2824 if (intel_output->ddc_bus == NULL) 2830 if (intel_encoder->ddc_bus == NULL)
2825 goto err_i2c; 2831 goto err_i2c;
2826 2832
2827 /* Wrap with our custom algo which switches to DDC mode */ 2833 /* Wrap with our custom algo which switches to DDC mode */
2828 intel_output->ddc_bus->algo = &intel_sdvo_i2c_bit_algo; 2834 intel_encoder->ddc_bus->algo = &intel_sdvo_i2c_bit_algo;
2829 2835
2830 /* In default case sdvo lvds is false */ 2836 /* In default case sdvo lvds is false */
2831 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 2837 intel_sdvo_get_capabilities(intel_encoder, &sdvo_priv->caps);
2832 2838
2833 if (intel_sdvo_output_setup(intel_output, 2839 if (intel_sdvo_output_setup(intel_encoder,
2834 sdvo_priv->caps.output_flags) != true) { 2840 sdvo_priv->caps.output_flags) != true) {
2835 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", 2841 DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
2836 output_device == SDVOB ? 'B' : 'C'); 2842 sdvo_reg == SDVOB ? 'B' : 'C');
2837 goto err_i2c; 2843 goto err_i2c;
2838 } 2844 }
2839 2845
2840 2846
2841 connector = &intel_output->base; 2847 connector = &intel_encoder->base;
2842 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs, 2848 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
2843 connector->connector_type); 2849 connector->connector_type);
2844 2850
@@ -2847,12 +2853,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2847 connector->doublescan_allowed = 0; 2853 connector->doublescan_allowed = 0;
2848 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 2854 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
2849 2855
2850 drm_encoder_init(dev, &intel_output->enc, 2856 drm_encoder_init(dev, &intel_encoder->enc,
2851 &intel_sdvo_enc_funcs, intel_output->enc.encoder_type); 2857 &intel_sdvo_enc_funcs, intel_encoder->enc.encoder_type);
2852 2858
2853 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 2859 drm_encoder_helper_add(&intel_encoder->enc, &intel_sdvo_helper_funcs);
2854 2860
2855 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 2861 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
2856 if (sdvo_priv->is_tv) 2862 if (sdvo_priv->is_tv)
2857 intel_sdvo_tv_create_property(connector); 2863 intel_sdvo_tv_create_property(connector);
2858 2864
@@ -2864,9 +2870,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2864 intel_sdvo_select_ddc_bus(sdvo_priv); 2870 intel_sdvo_select_ddc_bus(sdvo_priv);
2865 2871
2866 /* Set the input timing to the screen. Assume always input 0. */ 2872 /* Set the input timing to the screen. Assume always input 0. */
2867 intel_sdvo_set_target_input(intel_output, true, false); 2873 intel_sdvo_set_target_input(intel_encoder, true, false);
2868 2874
2869 intel_sdvo_get_input_pixel_clock_range(intel_output, 2875 intel_sdvo_get_input_pixel_clock_range(intel_encoder,
2870 &sdvo_priv->pixel_clock_min, 2876 &sdvo_priv->pixel_clock_min,
2871 &sdvo_priv->pixel_clock_max); 2877 &sdvo_priv->pixel_clock_max);
2872 2878
@@ -2893,12 +2899,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
2893err_i2c: 2899err_i2c:
2894 if (sdvo_priv->analog_ddc_bus != NULL) 2900 if (sdvo_priv->analog_ddc_bus != NULL)
2895 intel_i2c_destroy(sdvo_priv->analog_ddc_bus); 2901 intel_i2c_destroy(sdvo_priv->analog_ddc_bus);
2896 if (intel_output->ddc_bus != NULL) 2902 if (intel_encoder->ddc_bus != NULL)
2897 intel_i2c_destroy(intel_output->ddc_bus); 2903 intel_i2c_destroy(intel_encoder->ddc_bus);
2898 if (intel_output->i2c_bus != NULL) 2904 if (intel_encoder->i2c_bus != NULL)
2899 intel_i2c_destroy(intel_output->i2c_bus); 2905 intel_i2c_destroy(intel_encoder->i2c_bus);
2900err_inteloutput: 2906err_inteloutput:
2901 kfree(intel_output); 2907 kfree(intel_encoder);
2902 2908
2903 return false; 2909 return false;
2904} 2910}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..d7d39b2327df 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -921,8 +921,8 @@ intel_tv_save(struct drm_connector *connector)
921{ 921{
922 struct drm_device *dev = connector->dev; 922 struct drm_device *dev = connector->dev;
923 struct drm_i915_private *dev_priv = dev->dev_private; 923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_output *intel_output = to_intel_output(connector); 924 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
925 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 925 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
926 int i; 926 int i;
927 927
928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1); 928 tv_priv->save_TV_H_CTL_1 = I915_READ(TV_H_CTL_1);
@@ -971,8 +971,8 @@ intel_tv_restore(struct drm_connector *connector)
971{ 971{
972 struct drm_device *dev = connector->dev; 972 struct drm_device *dev = connector->dev;
973 struct drm_i915_private *dev_priv = dev->dev_private; 973 struct drm_i915_private *dev_priv = dev->dev_private;
974 struct intel_output *intel_output = to_intel_output(connector); 974 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
975 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 975 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
976 struct drm_crtc *crtc = connector->encoder->crtc; 976 struct drm_crtc *crtc = connector->encoder->crtc;
977 struct intel_crtc *intel_crtc; 977 struct intel_crtc *intel_crtc;
978 int i; 978 int i;
@@ -1068,9 +1068,9 @@ intel_tv_mode_lookup (char *tv_format)
1068} 1068}
1069 1069
1070static const struct tv_mode * 1070static const struct tv_mode *
1071intel_tv_mode_find (struct intel_output *intel_output) 1071intel_tv_mode_find (struct intel_encoder *intel_encoder)
1072{ 1072{
1073 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1073 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1074 1074
1075 return intel_tv_mode_lookup(tv_priv->tv_format); 1075 return intel_tv_mode_lookup(tv_priv->tv_format);
1076} 1076}
@@ -1078,8 +1078,8 @@ intel_tv_mode_find (struct intel_output *intel_output)
1078static enum drm_mode_status 1078static enum drm_mode_status
1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) 1079intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
1080{ 1080{
1081 struct intel_output *intel_output = to_intel_output(connector); 1081 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1082 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1083 1083
1084 /* Ensure TV refresh is close to desired refresh */ 1084 /* Ensure TV refresh is close to desired refresh */
1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) 1085 if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
@@ -1095,8 +1095,8 @@ intel_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
1095{ 1095{
1096 struct drm_device *dev = encoder->dev; 1096 struct drm_device *dev = encoder->dev;
1097 struct drm_mode_config *drm_config = &dev->mode_config; 1097 struct drm_mode_config *drm_config = &dev->mode_config;
1098 struct intel_output *intel_output = enc_to_intel_output(encoder); 1098 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_output); 1099 const struct tv_mode *tv_mode = intel_tv_mode_find (intel_encoder);
1100 struct drm_encoder *other_encoder; 1100 struct drm_encoder *other_encoder;
1101 1101
1102 if (!tv_mode) 1102 if (!tv_mode)
@@ -1121,9 +1121,9 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1121 struct drm_i915_private *dev_priv = dev->dev_private; 1121 struct drm_i915_private *dev_priv = dev->dev_private;
1122 struct drm_crtc *crtc = encoder->crtc; 1122 struct drm_crtc *crtc = encoder->crtc;
1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1123 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1124 struct intel_output *intel_output = enc_to_intel_output(encoder); 1124 struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
1125 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1125 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1126 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1127 u32 tv_ctl; 1127 u32 tv_ctl;
1128 u32 hctl1, hctl2, hctl3; 1128 u32 hctl1, hctl2, hctl3;
1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7; 1129 u32 vctl1, vctl2, vctl3, vctl4, vctl5, vctl6, vctl7;
@@ -1360,9 +1360,9 @@ static const struct drm_display_mode reported_modes[] = {
1360 * \return false if TV is disconnected. 1360 * \return false if TV is disconnected.
1361 */ 1361 */
1362static int 1362static int
1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output) 1363intel_tv_detect_type (struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
1364{ 1364{
1365 struct drm_encoder *encoder = &intel_output->enc; 1365 struct drm_encoder *encoder = &intel_encoder->enc;
1366 struct drm_device *dev = encoder->dev; 1366 struct drm_device *dev = encoder->dev;
1367 struct drm_i915_private *dev_priv = dev->dev_private; 1367 struct drm_i915_private *dev_priv = dev->dev_private;
1368 unsigned long irqflags; 1368 unsigned long irqflags;
@@ -1441,9 +1441,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1441 */ 1441 */
1442static void intel_tv_find_better_format(struct drm_connector *connector) 1442static void intel_tv_find_better_format(struct drm_connector *connector)
1443{ 1443{
1444 struct intel_output *intel_output = to_intel_output(connector); 1444 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1445 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1445 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1446 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1447 int i; 1447 int i;
1448 1448
1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) == 1449 if ((tv_priv->type == DRM_MODE_CONNECTOR_Component) ==
@@ -1475,9 +1475,9 @@ intel_tv_detect(struct drm_connector *connector)
1475{ 1475{
1476 struct drm_crtc *crtc; 1476 struct drm_crtc *crtc;
1477 struct drm_display_mode mode; 1477 struct drm_display_mode mode;
1478 struct intel_output *intel_output = to_intel_output(connector); 1478 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1479 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1479 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1480 struct drm_encoder *encoder = &intel_output->enc; 1480 struct drm_encoder *encoder = &intel_encoder->enc;
1481 int dpms_mode; 1481 int dpms_mode;
1482 int type = tv_priv->type; 1482 int type = tv_priv->type;
1483 1483
@@ -1485,12 +1485,12 @@ intel_tv_detect(struct drm_connector *connector)
1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1485 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1486 1486
1487 if (encoder->crtc && encoder->crtc->enabled) { 1487 if (encoder->crtc && encoder->crtc->enabled) {
1488 type = intel_tv_detect_type(encoder->crtc, intel_output); 1488 type = intel_tv_detect_type(encoder->crtc, intel_encoder);
1489 } else { 1489 } else {
1490 crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode); 1490 crtc = intel_get_load_detect_pipe(intel_encoder, &mode, &dpms_mode);
1491 if (crtc) { 1491 if (crtc) {
1492 type = intel_tv_detect_type(crtc, intel_output); 1492 type = intel_tv_detect_type(crtc, intel_encoder);
1493 intel_release_load_detect_pipe(intel_output, dpms_mode); 1493 intel_release_load_detect_pipe(intel_encoder, dpms_mode);
1494 } else 1494 } else
1495 type = -1; 1495 type = -1;
1496 } 1496 }
@@ -1525,8 +1525,8 @@ static void
1525intel_tv_chose_preferred_modes(struct drm_connector *connector, 1525intel_tv_chose_preferred_modes(struct drm_connector *connector,
1526 struct drm_display_mode *mode_ptr) 1526 struct drm_display_mode *mode_ptr)
1527{ 1527{
1528 struct intel_output *intel_output = to_intel_output(connector); 1528 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1529 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1530 1530
1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480) 1531 if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1532 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED;
@@ -1550,8 +1550,8 @@ static int
1550intel_tv_get_modes(struct drm_connector *connector) 1550intel_tv_get_modes(struct drm_connector *connector)
1551{ 1551{
1552 struct drm_display_mode *mode_ptr; 1552 struct drm_display_mode *mode_ptr;
1553 struct intel_output *intel_output = to_intel_output(connector); 1553 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output); 1554 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_encoder);
1555 int j, count = 0; 1555 int j, count = 0;
1556 u64 tmp; 1556 u64 tmp;
1557 1557
@@ -1604,11 +1604,11 @@ intel_tv_get_modes(struct drm_connector *connector)
1604static void 1604static void
1605intel_tv_destroy (struct drm_connector *connector) 1605intel_tv_destroy (struct drm_connector *connector)
1606{ 1606{
1607 struct intel_output *intel_output = to_intel_output(connector); 1607 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1608 1608
1609 drm_sysfs_connector_remove(connector); 1609 drm_sysfs_connector_remove(connector);
1610 drm_connector_cleanup(connector); 1610 drm_connector_cleanup(connector);
1611 kfree(intel_output); 1611 kfree(intel_encoder);
1612} 1612}
1613 1613
1614 1614
@@ -1617,9 +1617,9 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1617 uint64_t val) 1617 uint64_t val)
1618{ 1618{
1619 struct drm_device *dev = connector->dev; 1619 struct drm_device *dev = connector->dev;
1620 struct intel_output *intel_output = to_intel_output(connector); 1620 struct intel_encoder *intel_encoder = to_intel_encoder(connector);
1621 struct intel_tv_priv *tv_priv = intel_output->dev_priv; 1621 struct intel_tv_priv *tv_priv = intel_encoder->dev_priv;
1622 struct drm_encoder *encoder = &intel_output->enc; 1622 struct drm_encoder *encoder = &intel_encoder->enc;
1623 struct drm_crtc *crtc = encoder->crtc; 1623 struct drm_crtc *crtc = encoder->crtc;
1624 int ret = 0; 1624 int ret = 0;
1625 bool changed = false; 1625 bool changed = false;
@@ -1740,7 +1740,7 @@ intel_tv_init(struct drm_device *dev)
1740{ 1740{
1741 struct drm_i915_private *dev_priv = dev->dev_private; 1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 struct drm_connector *connector; 1742 struct drm_connector *connector;
1743 struct intel_output *intel_output; 1743 struct intel_encoder *intel_encoder;
1744 struct intel_tv_priv *tv_priv; 1744 struct intel_tv_priv *tv_priv;
1745 u32 tv_dac_on, tv_dac_off, save_tv_dac; 1745 u32 tv_dac_on, tv_dac_off, save_tv_dac;
1746 char **tv_format_names; 1746 char **tv_format_names;
@@ -1780,28 +1780,28 @@ intel_tv_init(struct drm_device *dev)
1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0) 1780 (tv_dac_off & TVDAC_STATE_CHG_EN) != 0)
1781 return; 1781 return;
1782 1782
1783 intel_output = kzalloc(sizeof(struct intel_output) + 1783 intel_encoder = kzalloc(sizeof(struct intel_encoder) +
1784 sizeof(struct intel_tv_priv), GFP_KERNEL); 1784 sizeof(struct intel_tv_priv), GFP_KERNEL);
1785 if (!intel_output) { 1785 if (!intel_encoder) {
1786 return; 1786 return;
1787 } 1787 }
1788 1788
1789 connector = &intel_output->base; 1789 connector = &intel_encoder->base;
1790 1790
1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1791 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1792 DRM_MODE_CONNECTOR_SVIDEO); 1792 DRM_MODE_CONNECTOR_SVIDEO);
1793 1793
1794 drm_encoder_init(dev, &intel_output->enc, &intel_tv_enc_funcs, 1794 drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
1795 DRM_MODE_ENCODER_TVDAC); 1795 DRM_MODE_ENCODER_TVDAC);
1796 1796
1797 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1797 drm_mode_connector_attach_encoder(&intel_encoder->base, &intel_encoder->enc);
1798 tv_priv = (struct intel_tv_priv *)(intel_output + 1); 1798 tv_priv = (struct intel_tv_priv *)(intel_encoder + 1);
1799 intel_output->type = INTEL_OUTPUT_TVOUT; 1799 intel_encoder->type = INTEL_OUTPUT_TVOUT;
1800 intel_output->crtc_mask = (1 << 0) | (1 << 1); 1800 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
1801 intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); 1801 intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
1802 intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); 1802 intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
1803 intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); 1803 intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
1804 intel_output->dev_priv = tv_priv; 1804 intel_encoder->dev_priv = tv_priv;
1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown; 1805 tv_priv->type = DRM_MODE_CONNECTOR_Unknown;
1806 1806
1807 /* BIOS margin values */ 1807 /* BIOS margin values */
@@ -1812,7 +1812,7 @@ intel_tv_init(struct drm_device *dev)
1812 1812
1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL); 1813 tv_priv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
1814 1814
1815 drm_encoder_helper_add(&intel_output->enc, &intel_tv_helper_funcs); 1815 drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs); 1816 drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
1817 connector->interlace_allowed = false; 1817 connector->interlace_allowed = false;
1818 connector->doublescan_allowed = false; 1818 connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 32db806f3b5a..453df3f6053f 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
12 nouveau_dp.o nouveau_grctx.o \ 12 nouveau_dp.o nouveau_grctx.o \
13 nv04_timer.o \ 13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \ 14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o \ 15 nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ 16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \ 17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \ 18 nv40_graph.o nv50_graph.o \
@@ -22,7 +22,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
22 nv50_cursor.o nv50_display.o nv50_fbcon.o \ 22 nv50_cursor.o nv50_display.o nv50_fbcon.o \
23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ 23 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ 24 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
25 nv17_gpio.o 25 nv17_gpio.o nv50_gpio.o
26 26
27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o 27nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 28nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 0e0730a53137..e13f6af0037a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -1,5 +1,6 @@
1#include <linux/pci.h> 1#include <linux/pci.h>
2#include <linux/acpi.h> 2#include <linux/acpi.h>
3#include <linux/slab.h>
3#include <acpi/acpi_drivers.h> 4#include <acpi/acpi_drivers.h>
4#include <acpi/acpi_bus.h> 5#include <acpi/acpi_bus.h>
5 6
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 75bceee76044..abc382a9918b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2573,48 +2573,34 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2573 * each GPIO according to various values listed in each entry 2573 * each GPIO according to various values listed in each entry
2574 */ 2574 */
2575 2575
2576 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 2576 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; 2577 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2578 const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
2579 const uint8_t *gpio_entry;
2580 int i; 2578 int i;
2581 2579
2582 if (!iexec->execute) 2580 if (dev_priv->card_type != NV_50) {
2583 return 1; 2581 NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
2584 2582 return -ENODEV;
2585 if (bios->dcb.version != 0x40) {
2586 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2587 return 0;
2588 } 2583 }
2589 2584
2590 if (!bios->dcb.gpio_table_ptr) { 2585 if (!iexec->execute)
2591 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); 2586 return 1;
2592 return 0;
2593 }
2594 2587
2595 gpio_entry = gpio_table + gpio_table[1]; 2588 for (i = 0; i < bios->dcb.gpio.entries; i++) {
2596 for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) { 2589 struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
2597 uint32_t entry = ROM32(gpio_entry[0]), r, s, v; 2590 uint32_t r, s, v;
2598 int line = (entry & 0x0000001f);
2599 2591
2600 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry); 2592 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
2601 2593
2602 if ((entry & 0x0000ff00) == 0x0000ff00) 2594 nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
2603 continue;
2604 2595
2605 r = nv50_gpio_reg[line >> 3]; 2596 /* The NVIDIA binary driver doesn't appear to actually do
2606 s = (line & 0x07) << 2; 2597 * any of this, my VBIOS does however.
2607 v = bios_rd32(bios, r) & ~(0x00000003 << s); 2598 */
2608 if (entry & 0x01000000) 2599 /* Not a clue, needs de-magicing */
2609 v |= (((entry & 0x60000000) >> 29) ^ 2) << s; 2600 r = nv50_gpio_ctl[gpio->line >> 4];
2610 else 2601 s = (gpio->line & 0x0f);
2611 v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
2612 bios_wr32(bios, r, v);
2613
2614 r = nv50_gpio_ctl[line >> 4];
2615 s = (line & 0x0f);
2616 v = bios_rd32(bios, r) & ~(0x00010001 << s); 2602 v = bios_rd32(bios, r) & ~(0x00010001 << s);
2617 switch ((entry & 0x06000000) >> 25) { 2603 switch ((gpio->entry & 0x06000000) >> 25) {
2618 case 1: 2604 case 1:
2619 v |= (0x00000001 << s); 2605 v |= (0x00000001 << s);
2620 break; 2606 break;
@@ -3198,7 +3184,6 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3198 struct nvbios *bios = &dev_priv->vbios; 3184 struct nvbios *bios = &dev_priv->vbios;
3199 unsigned int outputset = (dcbent->or == 4) ? 1 : 0; 3185 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3200 uint16_t scriptptr = 0, clktable; 3186 uint16_t scriptptr = 0, clktable;
3201 uint8_t clktableptr = 0;
3202 3187
3203 /* 3188 /*
3204 * For now we assume version 3.0 table - g80 support will need some 3189 * For now we assume version 3.0 table - g80 support will need some
@@ -3217,26 +3202,29 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
3217 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]); 3202 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
3218 break; 3203 break;
3219 case LVDS_RESET: 3204 case LVDS_RESET:
3205 clktable = bios->fp.lvdsmanufacturerpointer + 15;
3206 if (dcbent->or == 4)
3207 clktable += 8;
3208
3220 if (dcbent->lvdsconf.use_straps_for_mode) { 3209 if (dcbent->lvdsconf.use_straps_for_mode) {
3221 if (bios->fp.dual_link) 3210 if (bios->fp.dual_link)
3222 clktableptr += 2; 3211 clktable += 4;
3223 if (bios->fp.BITbit1) 3212 if (bios->fp.if_is_24bit)
3224 clktableptr++; 3213 clktable += 2;
3225 } else { 3214 } else {
3226 /* using EDID */ 3215 /* using EDID */
3227 uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3216 int cmpval_24bit = (dcbent->or == 4) ? 4 : 1;
3228 int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
3229 3217
3230 if (bios->fp.dual_link) { 3218 if (bios->fp.dual_link) {
3231 clktableptr += 2; 3219 clktable += 4;
3232 fallbackcmpval *= 2; 3220 cmpval_24bit <<= 1;
3233 } 3221 }
3234 if (fallbackcmpval & fallback) 3222
3235 clktableptr++; 3223 if (bios->fp.strapless_is_24bit & cmpval_24bit)
3224 clktable += 2;
3236 } 3225 }
3237 3226
3238 /* adding outputset * 8 may not be correct */ 3227 clktable = ROM16(bios->data[clktable]);
3239 clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
3240 if (!clktable) { 3228 if (!clktable) {
3241 NV_ERROR(dev, "Pixel clock comparison table not found\n"); 3229 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3242 return -ENOENT; 3230 return -ENOENT;
@@ -3638,37 +3626,40 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
3638 *if_is_24bit = bios->data[lvdsofs] & 16; 3626 *if_is_24bit = bios->data[lvdsofs] & 16;
3639 break; 3627 break;
3640 case 0x30: 3628 case 0x30:
3641 /* 3629 case 0x40:
3642 * My money would be on there being a 24 bit interface bit in
3643 * this table, but I have no example of a laptop bios with a
3644 * 24 bit panel to confirm that. Hence we shout loudly if any
3645 * bit other than bit 0 is set (I've not even seen bit 1)
3646 */
3647 if (bios->data[lvdsofs] > 1)
3648 NV_ERROR(dev,
3649 "You have a very unusual laptop display; please report it\n");
3650 /* 3630 /*
3651 * No sign of the "power off for reset" or "reset for panel 3631 * No sign of the "power off for reset" or "reset for panel
3652 * on" bits, but it's safer to assume we should 3632 * on" bits, but it's safer to assume we should
3653 */ 3633 */
3654 bios->fp.power_off_for_reset = true; 3634 bios->fp.power_off_for_reset = true;
3655 bios->fp.reset_after_pclk_change = true; 3635 bios->fp.reset_after_pclk_change = true;
3636
3656 /* 3637 /*
3657 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is 3638 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
3658 * over-written, and BITbit1 isn't used 3639 * over-written, and if_is_24bit isn't used
3659 */ 3640 */
3660 bios->fp.dual_link = bios->data[lvdsofs] & 1; 3641 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3661 bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
3662 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3663 break;
3664 case 0x40:
3665 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3666 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2; 3642 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
3667 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4]; 3643 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3668 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10; 3644 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3669 break; 3645 break;
3670 } 3646 }
3671 3647
3648 /* Dell Latitude D620 reports a too-high value for the dual-link
3649 * transition freq, causing us to program the panel incorrectly.
3650 *
3651 * It doesn't appear the VBIOS actually uses its transition freq
3652 * (90000kHz), instead it uses the "Number of LVDS channels" field
3653 * out of the panel ID structure (http://www.spwg.org/).
3654 *
3655 * For the moment, a quirk will do :)
3656 */
3657 if ((dev->pdev->device == 0x01d7) &&
3658 (dev->pdev->subsystem_vendor == 0x1028) &&
3659 (dev->pdev->subsystem_device == 0x01c2)) {
3660 bios->fp.duallink_transition_clk = 80000;
3661 }
3662
3672 /* set dual_link flag for EDID case */ 3663 /* set dual_link flag for EDID case */
3673 if (pxclk && (chip_version < 0x25 || chip_version > 0x28)) 3664 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
3674 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk); 3665 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -5077,25 +5068,25 @@ parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5077 gpio->tag = tag; 5068 gpio->tag = tag;
5078 gpio->line = line; 5069 gpio->line = line;
5079 gpio->invert = flags != 4; 5070 gpio->invert = flags != 4;
5071 gpio->entry = ent;
5080} 5072}
5081 5073
5082static void 5074static void
5083parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset) 5075parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5084{ 5076{
5077 uint32_t entry = ROM32(bios->data[offset]);
5085 struct dcb_gpio_entry *gpio; 5078 struct dcb_gpio_entry *gpio;
5086 uint32_t ent = ROM32(bios->data[offset]);
5087 uint8_t line = ent & 0x1f,
5088 tag = ent >> 8 & 0xff;
5089 5079
5090 if (tag == 0xff) 5080 if ((entry & 0x0000ff00) == 0x0000ff00)
5091 return; 5081 return;
5092 5082
5093 gpio = new_gpio_entry(bios); 5083 gpio = new_gpio_entry(bios);
5094 5084 gpio->tag = (entry & 0x0000ff00) >> 8;
5095 /* Currently unused, we may need more fields parsed at some 5085 gpio->line = (entry & 0x0000001f) >> 0;
5096 * point. */ 5086 gpio->state_default = (entry & 0x01000000) >> 24;
5097 gpio->tag = tag; 5087 gpio->state[0] = (entry & 0x18000000) >> 27;
5098 gpio->line = line; 5088 gpio->state[1] = (entry & 0x60000000) >> 29;
5089 gpio->entry = entry;
5099} 5090}
5100 5091
5101static void 5092static void
@@ -5211,6 +5202,21 @@ divine_connector_type(struct nvbios *bios, int index)
5211} 5202}
5212 5203
5213static void 5204static void
5205apply_dcb_connector_quirks(struct nvbios *bios, int idx)
5206{
5207 struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
5208 struct drm_device *dev = bios->dev;
5209
5210 /* Gigabyte NX85T */
5211 if ((dev->pdev->device == 0x0421) &&
5212 (dev->pdev->subsystem_vendor == 0x1458) &&
5213 (dev->pdev->subsystem_device == 0x344c)) {
5214 if (cte->type == DCB_CONNECTOR_HDMI_1)
5215 cte->type = DCB_CONNECTOR_DVI_I;
5216 }
5217}
5218
5219static void
5214parse_dcb_connector_table(struct nvbios *bios) 5220parse_dcb_connector_table(struct nvbios *bios)
5215{ 5221{
5216 struct drm_device *dev = bios->dev; 5222 struct drm_device *dev = bios->dev;
@@ -5238,13 +5244,14 @@ parse_dcb_connector_table(struct nvbios *bios)
5238 entry = conntab + conntab[1]; 5244 entry = conntab + conntab[1];
5239 cte = &ct->entry[0]; 5245 cte = &ct->entry[0];
5240 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) { 5246 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
5247 cte->index = i;
5241 if (conntab[3] == 2) 5248 if (conntab[3] == 2)
5242 cte->entry = ROM16(entry[0]); 5249 cte->entry = ROM16(entry[0]);
5243 else 5250 else
5244 cte->entry = ROM32(entry[0]); 5251 cte->entry = ROM32(entry[0]);
5245 5252
5246 cte->type = (cte->entry & 0x000000ff) >> 0; 5253 cte->type = (cte->entry & 0x000000ff) >> 0;
5247 cte->index = (cte->entry & 0x00000f00) >> 8; 5254 cte->index2 = (cte->entry & 0x00000f00) >> 8;
5248 switch (cte->entry & 0x00033000) { 5255 switch (cte->entry & 0x00033000) {
5249 case 0x00001000: 5256 case 0x00001000:
5250 cte->gpio_tag = 0x07; 5257 cte->gpio_tag = 0x07;
@@ -5266,6 +5273,8 @@ parse_dcb_connector_table(struct nvbios *bios)
5266 if (cte->type == 0xff) 5273 if (cte->type == 0xff)
5267 continue; 5274 continue;
5268 5275
5276 apply_dcb_connector_quirks(bios, i);
5277
5269 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", 5278 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5270 i, cte->entry, cte->type, cte->index, cte->gpio_tag); 5279 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5271 5280
@@ -5287,10 +5296,16 @@ parse_dcb_connector_table(struct nvbios *bios)
5287 break; 5296 break;
5288 default: 5297 default:
5289 cte->type = divine_connector_type(bios, cte->index); 5298 cte->type = divine_connector_type(bios, cte->index);
5290 NV_WARN(dev, "unknown type, using 0x%02x", cte->type); 5299 NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
5291 break; 5300 break;
5292 } 5301 }
5293 5302
5303 if (nouveau_override_conntype) {
5304 int type = divine_connector_type(bios, cte->index);
5305 if (type != cte->type)
5306 NV_WARN(dev, " -> type 0x%02x\n", cte->type);
5307 }
5308
5294 } 5309 }
5295} 5310}
5296 5311
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 9f688aa9a655..c0d7b0a3ece0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -49,6 +49,9 @@ struct dcb_gpio_entry {
49 enum dcb_gpio_tag tag; 49 enum dcb_gpio_tag tag;
50 int line; 50 int line;
51 bool invert; 51 bool invert;
52 uint32_t entry;
53 uint8_t state_default;
54 uint8_t state[2];
52}; 55};
53 56
54struct dcb_gpio_table { 57struct dcb_gpio_table {
@@ -72,9 +75,10 @@ enum dcb_connector_type {
72}; 75};
73 76
74struct dcb_connector_table_entry { 77struct dcb_connector_table_entry {
78 uint8_t index;
75 uint32_t entry; 79 uint32_t entry;
76 enum dcb_connector_type type; 80 enum dcb_connector_type type;
77 uint8_t index; 81 uint8_t index2;
78 uint8_t gpio_tag; 82 uint8_t gpio_tag;
79}; 83};
80 84
@@ -266,7 +270,6 @@ struct nvbios {
266 bool reset_after_pclk_change; 270 bool reset_after_pclk_change;
267 bool dual_link; 271 bool dual_link;
268 bool link_c_increment; 272 bool link_c_increment;
269 bool BITbit1;
270 bool if_is_24bit; 273 bool if_is_24bit;
271 int duallink_transition_clk; 274 int duallink_transition_clk;
272 uint8_t strapless_is_24bit; 275 uint8_t strapless_is_24bit;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 028719fddf76..957d17629840 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -34,6 +34,7 @@
34#include "nouveau_dma.h" 34#include "nouveau_dma.h"
35 35
36#include <linux/log2.h> 36#include <linux/log2.h>
37#include <linux/slab.h>
37 38
38static void 39static void
39nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 40nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
@@ -71,7 +72,7 @@ nouveau_bo_fixup_align(struct drm_device *dev,
71 * many small buffers. 72 * many small buffers.
72 */ 73 */
73 if (dev_priv->card_type == NV_50) { 74 if (dev_priv->card_type == NV_50) {
74 uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15; 75 uint32_t block_size = dev_priv->vram_size >> 15;
75 int i; 76 int i;
76 77
77 switch (tile_flags) { 78 switch (tile_flags) {
@@ -153,7 +154,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
153 154
154 nvbo->placement.fpfn = 0; 155 nvbo->placement.fpfn = 0;
155 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 156 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
156 nouveau_bo_placement_set(nvbo, flags); 157 nouveau_bo_placement_set(nvbo, flags, 0);
157 158
158 nvbo->channel = chan; 159 nvbo->channel = chan;
159 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 160 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
@@ -172,26 +173,33 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
172 return 0; 173 return 0;
173} 174}
174 175
176static void
177set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
178{
179 *n = 0;
180
181 if (type & TTM_PL_FLAG_VRAM)
182 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
183 if (type & TTM_PL_FLAG_TT)
184 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
185 if (type & TTM_PL_FLAG_SYSTEM)
186 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
187}
188
175void 189void
176nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) 190nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
177{ 191{
178 int n = 0; 192 struct ttm_placement *pl = &nvbo->placement;
179 193 uint32_t flags = TTM_PL_MASK_CACHING |
180 if (memtype & TTM_PL_FLAG_VRAM) 194 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
181 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING; 195
182 if (memtype & TTM_PL_FLAG_TT) 196 pl->placement = nvbo->placements;
183 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 197 set_placement_list(nvbo->placements, &pl->num_placement,
184 if (memtype & TTM_PL_FLAG_SYSTEM) 198 type, flags);
185 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; 199
186 nvbo->placement.placement = nvbo->placements; 200 pl->busy_placement = nvbo->busy_placements;
187 nvbo->placement.busy_placement = nvbo->placements; 201 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
188 nvbo->placement.num_placement = n; 202 type | busy, flags);
189 nvbo->placement.num_busy_placement = n;
190
191 if (nvbo->pin_refcnt) {
192 while (n--)
193 nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
194 }
195} 203}
196 204
197int 205int
@@ -199,7 +207,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
199{ 207{
200 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 208 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
201 struct ttm_buffer_object *bo = &nvbo->bo; 209 struct ttm_buffer_object *bo = &nvbo->bo;
202 int ret, i; 210 int ret;
203 211
204 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 212 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
205 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 213 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
@@ -215,9 +223,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
215 if (ret) 223 if (ret)
216 goto out; 224 goto out;
217 225
218 nouveau_bo_placement_set(nvbo, memtype); 226 nouveau_bo_placement_set(nvbo, memtype, 0);
219 for (i = 0; i < nvbo->placement.num_placement; i++)
220 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
221 227
222 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 228 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
223 if (ret == 0) { 229 if (ret == 0) {
@@ -244,7 +250,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
244{ 250{
245 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 251 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
246 struct ttm_buffer_object *bo = &nvbo->bo; 252 struct ttm_buffer_object *bo = &nvbo->bo;
247 int ret, i; 253 int ret;
248 254
249 if (--nvbo->pin_refcnt) 255 if (--nvbo->pin_refcnt)
250 return 0; 256 return 0;
@@ -253,8 +259,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
253 if (ret) 259 if (ret)
254 return ret; 260 return ret;
255 261
256 for (i = 0; i < nvbo->placement.num_placement; i++) 262 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
257 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
258 263
259 ret = ttm_bo_validate(bo, &nvbo->placement, false, false); 264 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
260 if (ret == 0) { 265 if (ret == 0) {
@@ -395,8 +400,8 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
395 man->io_addr = NULL; 400 man->io_addr = NULL;
396 man->io_offset = drm_get_resource_start(dev, 1); 401 man->io_offset = drm_get_resource_start(dev, 1);
397 man->io_size = drm_get_resource_len(dev, 1); 402 man->io_size = drm_get_resource_len(dev, 1);
398 if (man->io_size > nouveau_mem_fb_amount(dev)) 403 if (man->io_size > dev_priv->vram_size)
399 man->io_size = nouveau_mem_fb_amount(dev); 404 man->io_size = dev_priv->vram_size;
400 405
401 man->gpu_offset = dev_priv->vm_vram_base; 406 man->gpu_offset = dev_priv->vm_vram_base;
402 break; 407 break;
@@ -439,11 +444,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
439 444
440 switch (bo->mem.mem_type) { 445 switch (bo->mem.mem_type) {
441 case TTM_PL_VRAM: 446 case TTM_PL_VRAM:
442 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | 447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
443 TTM_PL_FLAG_SYSTEM); 448 TTM_PL_FLAG_SYSTEM);
444 break; 449 break;
445 default: 450 default:
446 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); 451 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
447 break; 452 break;
448 } 453 }
449 454
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 6dfb425cbae9..1fc57ef58295 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -142,7 +142,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
142 GFP_KERNEL); 142 GFP_KERNEL);
143 if (!dev_priv->fifos[channel]) 143 if (!dev_priv->fifos[channel])
144 return -ENOMEM; 144 return -ENOMEM;
145 dev_priv->fifo_alloc_count++;
146 chan = dev_priv->fifos[channel]; 145 chan = dev_priv->fifos[channel];
147 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 146 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
148 INIT_LIST_HEAD(&chan->fence.pending); 147 INIT_LIST_HEAD(&chan->fence.pending);
@@ -321,7 +320,6 @@ nouveau_channel_free(struct nouveau_channel *chan)
321 iounmap(chan->user); 320 iounmap(chan->user);
322 321
323 dev_priv->fifos[chan->id] = NULL; 322 dev_priv->fifos[chan->id] = NULL;
324 dev_priv->fifo_alloc_count--;
325 kfree(chan); 323 kfree(chan);
326} 324}
327 325
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 24327f468c4b..14afe1e47e57 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
302 302
303detect_analog: 303detect_analog:
304 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); 304 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
305 if (!nv_encoder) 305 if (!nv_encoder && !nouveau_tv_disable)
306 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); 306 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
307 if (nv_encoder) { 307 if (nv_encoder) {
308 struct drm_encoder *encoder = to_drm_encoder(nv_encoder); 308 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8ff9ef5d4b47..a251886a0ce6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -137,10 +137,9 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
137{ 137{
138 struct drm_info_node *node = (struct drm_info_node *) m->private; 138 struct drm_info_node *node = (struct drm_info_node *) m->private;
139 struct drm_minor *minor = node->minor; 139 struct drm_minor *minor = node->minor;
140 struct drm_device *dev = minor->dev; 140 struct drm_nouveau_private *dev_priv = minor->dev->dev_private;
141 141
142 seq_printf(m, "VRAM total: %dKiB\n", 142 seq_printf(m, "VRAM total: %dKiB\n", (int)(dev_priv->vram_size >> 10));
143 (int)(nouveau_mem_fb_amount(dev) >> 10));
144 return 0; 143 return 0;
145} 144}
146 145
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c8482a108a78..65c441a1999f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); 190 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
191 191
192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; 192 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
193
194 DRM_MEMORYBARRIER();
195 /* Flush writes. */
196 nouveau_bo_rd32(pb, 0);
197
193 nvchan_wr32(chan, 0x8c, chan->dma.ib_put); 198 nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
194 chan->dma.ib_free--; 199 chan->dma.ib_free--;
195} 200}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index f954ad93e81f..deeb21c6865c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -483,7 +483,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT); 483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT); 484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
485 485
486 for (;;) { 486 for (i = 0; i < 16; i++) {
487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000); 487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl); 488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000); 489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
@@ -502,6 +502,12 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
502 break; 502 break;
503 } 503 }
504 504
505 if (i == 16) {
506 NV_ERROR(dev, "auxch DEFER too many times, bailing\n");
507 ret = -EREMOTEIO;
508 goto out;
509 }
510
505 if (cmd & 1) { 511 if (cmd & 1) {
506 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) { 512 if ((stat & NV50_AUXCH_STAT_COUNT) != data_nr) {
507 ret = -EREMOTEIO; 513 ret = -EREMOTEIO;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 30cc09e8a709..1de974acbc65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
83int nouveau_nofbaccel = 0; 83int nouveau_nofbaccel = 0;
84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); 84module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
85 85
86MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
87int nouveau_override_conntype = 0;
88module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
89
90MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
91int nouveau_tv_disable = 0;
92module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
93
86MODULE_PARM_DESC(tv_norm, "Default TV norm.\n" 94MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
87 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n" 95 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
88 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n" 96 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
154 if (pm_state.event == PM_EVENT_PRETHAW) 162 if (pm_state.event == PM_EVENT_PRETHAW)
155 return 0; 163 return 0;
156 164
165 NV_INFO(dev, "Disabling fbcon acceleration...\n");
157 fbdev_flags = dev_priv->fbdev_info->flags; 166 fbdev_flags = dev_priv->fbdev_info->flags;
158 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; 167 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
159 168
169 NV_INFO(dev, "Unpinning framebuffer(s)...\n");
160 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 170 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
161 struct nouveau_framebuffer *nouveau_fb; 171 struct nouveau_framebuffer *nouveau_fb;
162 172
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4b9aaf2a8d0f..ace630aa89e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -76,6 +76,7 @@ struct nouveau_bo {
76 struct ttm_buffer_object bo; 76 struct ttm_buffer_object bo;
77 struct ttm_placement placement; 77 struct ttm_placement placement;
78 u32 placements[3]; 78 u32 placements[3];
79 u32 busy_placements[3];
79 struct ttm_bo_kmap_obj kmap; 80 struct ttm_bo_kmap_obj kmap;
80 struct list_head head; 81 struct list_head head;
81 82
@@ -519,6 +520,7 @@ struct drm_nouveau_private {
519 520
520 struct workqueue_struct *wq; 521 struct workqueue_struct *wq;
521 struct work_struct irq_work; 522 struct work_struct irq_work;
523 struct work_struct hpd_work;
522 524
523 struct list_head vbl_waiting; 525 struct list_head vbl_waiting;
524 526
@@ -533,7 +535,6 @@ struct drm_nouveau_private {
533 535
534 struct fb_info *fbdev_info; 536 struct fb_info *fbdev_info;
535 537
536 int fifo_alloc_count;
537 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; 538 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
538 539
539 struct nouveau_engine engine; 540 struct nouveau_engine engine;
@@ -553,12 +554,6 @@ struct drm_nouveau_private {
553 uint32_t ramro_offset; 554 uint32_t ramro_offset;
554 uint32_t ramro_size; 555 uint32_t ramro_size;
555 556
556 /* base physical addresses */
557 uint64_t fb_phys;
558 uint64_t fb_available_size;
559 uint64_t fb_mappable_pages;
560 uint64_t fb_aper_free;
561
562 struct { 557 struct {
563 enum { 558 enum {
564 NOUVEAU_GART_NONE = 0, 559 NOUVEAU_GART_NONE = 0,
@@ -572,10 +567,6 @@ struct drm_nouveau_private {
572 struct nouveau_gpuobj *sg_ctxdma; 567 struct nouveau_gpuobj *sg_ctxdma;
573 struct page *sg_dummy_page; 568 struct page *sg_dummy_page;
574 dma_addr_t sg_dummy_bus; 569 dma_addr_t sg_dummy_bus;
575
576 /* nottm hack */
577 struct drm_ttm_backend *sg_be;
578 unsigned long sg_handle;
579 } gart_info; 570 } gart_info;
580 571
581 /* nv10-nv40 tiling regions */ 572 /* nv10-nv40 tiling regions */
@@ -584,6 +575,16 @@ struct drm_nouveau_private {
584 spinlock_t lock; 575 spinlock_t lock;
585 } tile; 576 } tile;
586 577
578 /* VRAM/fb configuration */
579 uint64_t vram_size;
580 uint64_t vram_sys_base;
581
582 uint64_t fb_phys;
583 uint64_t fb_available_size;
584 uint64_t fb_mappable_pages;
585 uint64_t fb_aper_free;
586 int fb_mtrr;
587
587 /* G8x/G9x virtual address space */ 588 /* G8x/G9x virtual address space */
588 uint64_t vm_gart_base; 589 uint64_t vm_gart_base;
589 uint64_t vm_gart_size; 590 uint64_t vm_gart_size;
@@ -592,10 +593,6 @@ struct drm_nouveau_private {
592 uint64_t vm_end; 593 uint64_t vm_end;
593 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; 594 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
594 int vm_vram_pt_nr; 595 int vm_vram_pt_nr;
595 uint64_t vram_sys_base;
596
597 /* the mtrr covering the FB */
598 int fb_mtrr;
599 596
600 struct mem_block *ramin_heap; 597 struct mem_block *ramin_heap;
601 598
@@ -614,11 +611,7 @@ struct drm_nouveau_private {
614 uint32_t dac_users[4]; 611 uint32_t dac_users[4];
615 612
616 struct nouveau_suspend_resume { 613 struct nouveau_suspend_resume {
617 uint32_t fifo_mode;
618 uint32_t graph_ctx_control;
619 uint32_t graph_state;
620 uint32_t *ramin_copy; 614 uint32_t *ramin_copy;
621 uint64_t ramin_size;
622 } susres; 615 } susres;
623 616
624 struct backlight_device *backlight; 617 struct backlight_device *backlight;
@@ -681,6 +674,7 @@ extern int nouveau_uscript_tmds;
681extern int nouveau_vram_pushbuf; 674extern int nouveau_vram_pushbuf;
682extern int nouveau_vram_notify; 675extern int nouveau_vram_notify;
683extern int nouveau_fbpercrtc; 676extern int nouveau_fbpercrtc;
677extern int nouveau_tv_disable;
684extern char *nouveau_tv_norm; 678extern char *nouveau_tv_norm;
685extern int nouveau_reg_debug; 679extern int nouveau_reg_debug;
686extern char *nouveau_vbios; 680extern char *nouveau_vbios;
@@ -688,6 +682,7 @@ extern int nouveau_ctxfw;
688extern int nouveau_ignorelid; 682extern int nouveau_ignorelid;
689extern int nouveau_nofbaccel; 683extern int nouveau_nofbaccel;
690extern int nouveau_noaccel; 684extern int nouveau_noaccel;
685extern int nouveau_override_conntype;
691 686
692extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); 687extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
693extern int nouveau_pci_resume(struct pci_dev *pdev); 688extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -715,7 +710,7 @@ extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
715 struct drm_file *, int tail); 710 struct drm_file *, int tail);
716extern void nouveau_mem_takedown(struct mem_block **heap); 711extern void nouveau_mem_takedown(struct mem_block **heap);
717extern void nouveau_mem_free_block(struct mem_block *); 712extern void nouveau_mem_free_block(struct mem_block *);
718extern uint64_t nouveau_mem_fb_amount(struct drm_device *); 713extern int nouveau_mem_detect(struct drm_device *dev);
719extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); 714extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
720extern int nouveau_mem_init(struct drm_device *); 715extern int nouveau_mem_init(struct drm_device *);
721extern int nouveau_mem_init_agp(struct drm_device *); 716extern int nouveau_mem_init_agp(struct drm_device *);
@@ -926,6 +921,10 @@ extern void nv40_fb_takedown(struct drm_device *);
926extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t, 921extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
927 uint32_t, uint32_t); 922 uint32_t, uint32_t);
928 923
924/* nv50_fb.c */
925extern int nv50_fb_init(struct drm_device *);
926extern void nv50_fb_takedown(struct drm_device *);
927
929/* nv04_fifo.c */ 928/* nv04_fifo.c */
930extern int nv04_fifo_init(struct drm_device *); 929extern int nv04_fifo_init(struct drm_device *);
931extern void nv04_fifo_disable(struct drm_device *); 930extern void nv04_fifo_disable(struct drm_device *);
@@ -1118,7 +1117,8 @@ extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1118extern int nouveau_bo_unpin(struct nouveau_bo *); 1117extern int nouveau_bo_unpin(struct nouveau_bo *);
1119extern int nouveau_bo_map(struct nouveau_bo *); 1118extern int nouveau_bo_map(struct nouveau_bo *);
1120extern void nouveau_bo_unmap(struct nouveau_bo *); 1119extern void nouveau_bo_unmap(struct nouveau_bo *);
1121extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype); 1120extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t type,
1121 uint32_t busy);
1122extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); 1122extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1123extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); 1123extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1124extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); 1124extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
@@ -1162,6 +1162,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1162int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1162int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1163int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); 1163int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1164 1164
1165/* nv50_gpio.c */
1166int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1167int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1168
1165#ifndef ioread32_native 1169#ifndef ioread32_native
1166#ifdef __BIG_ENDIAN 1170#ifdef __BIG_ENDIAN
1167#define ioread16_native ioread16be 1171#define ioread16_native ioread16be
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index bc4a24029ed1..9f28b94e479b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -47,6 +47,7 @@ struct nouveau_encoder {
47 47
48 union { 48 union {
49 struct { 49 struct {
50 int mc_unknown;
50 int dpcd_version; 51 int dpcd_version;
51 int link_nr; 52 int link_nr;
52 int link_bw; 53 int link_bw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 68cedd9194fe..8e7dc1d4912a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -30,7 +30,6 @@
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/tty.h> 32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h> 33#include <linux/sysrq.h>
35#include <linux/delay.h> 34#include <linux/delay.h>
36#include <linux/fb.h> 35#include <linux/fb.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 0d22f66f1c79..1bc0b38a5167 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -180,40 +180,35 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
180{ 180{
181 struct nouveau_bo *nvbo = gem->driver_private; 181 struct nouveau_bo *nvbo = gem->driver_private;
182 struct ttm_buffer_object *bo = &nvbo->bo; 182 struct ttm_buffer_object *bo = &nvbo->bo;
183 uint64_t flags; 183 uint32_t domains = valid_domains &
184 (write_domains ? write_domains : read_domains);
185 uint32_t pref_flags = 0, valid_flags = 0;
184 186
185 if (!valid_domains || (!read_domains && !write_domains)) 187 if (!domains)
186 return -EINVAL; 188 return -EINVAL;
187 189
188 if (write_domains) { 190 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
189 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 191 valid_flags |= TTM_PL_FLAG_VRAM;
190 (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) 192
191 flags = TTM_PL_FLAG_VRAM; 193 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
192 else 194 valid_flags |= TTM_PL_FLAG_TT;
193 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 195
194 (write_domains & NOUVEAU_GEM_DOMAIN_GART)) 196 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
195 flags = TTM_PL_FLAG_TT; 197 bo->mem.mem_type == TTM_PL_VRAM)
196 else 198 pref_flags |= TTM_PL_FLAG_VRAM;
197 return -EINVAL; 199
198 } else { 200 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
199 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 201 bo->mem.mem_type == TTM_PL_TT)
200 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 202 pref_flags |= TTM_PL_FLAG_TT;
201 bo->mem.mem_type == TTM_PL_VRAM) 203
202 flags = TTM_PL_FLAG_VRAM; 204 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
203 else 205 pref_flags |= TTM_PL_FLAG_VRAM;
204 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && 206
205 (read_domains & NOUVEAU_GEM_DOMAIN_GART) && 207 else
206 bo->mem.mem_type == TTM_PL_TT) 208 pref_flags |= TTM_PL_FLAG_TT;
207 flags = TTM_PL_FLAG_TT; 209
208 else 210 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
209 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
210 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
211 flags = TTM_PL_FLAG_VRAM;
212 else
213 flags = TTM_PL_FLAG_TT;
214 }
215 211
216 nouveau_bo_placement_set(nvbo, flags);
217 return 0; 212 return 0;
218} 213}
219 214
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
index c7ebec696747..32f0e495464c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/firmware.h> 25#include <linux/firmware.h>
26#include <linux/slab.h>
26 27
27#include "drmP.h" 28#include "drmP.h"
28#include "nouveau_drv.h" 29#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 95220ddebb45..13e73cee4c44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -51,6 +51,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
51 51
52 if (dev_priv->card_type == NV_50) { 52 if (dev_priv->card_type == NV_50) {
53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); 53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
54 INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
54 INIT_LIST_HEAD(&dev_priv->vbl_waiting); 55 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
55 } 56 }
56} 57}
@@ -311,6 +312,31 @@ nouveau_print_bitfield_names_(uint32_t value,
311#define nouveau_print_bitfield_names(val, namelist) \ 312#define nouveau_print_bitfield_names(val, namelist) \
312 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist)) 313 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
313 314
315struct nouveau_enum_names {
316 uint32_t value;
317 const char *name;
318};
319
320static void
321nouveau_print_enum_names_(uint32_t value,
322 const struct nouveau_enum_names *namelist,
323 const int namelist_len)
324{
325 /*
326 * Caller must have already printed the KERN_* log level for us.
327 * Also the caller is responsible for adding the newline.
328 */
329 int i;
330 for (i = 0; i < namelist_len; ++i) {
331 if (value == namelist[i].value) {
332 printk("%s", namelist[i].name);
333 return;
334 }
335 }
336 printk("unknown value 0x%08x", value);
337}
338#define nouveau_print_enum_names(val, namelist) \
339 nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
314 340
315static int 341static int
316nouveau_graph_chid_from_grctx(struct drm_device *dev) 342nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +453,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
427 struct drm_nouveau_private *dev_priv = dev->dev_private; 453 struct drm_nouveau_private *dev_priv = dev->dev_private;
428 uint32_t nsource = trap->nsource, nstatus = trap->nstatus; 454 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
429 455
430 NV_INFO(dev, "%s - nSource:", id); 456 if (dev_priv->card_type < NV_50) {
431 nouveau_print_bitfield_names(nsource, nsource_names); 457 NV_INFO(dev, "%s - nSource:", id);
432 printk(", nStatus:"); 458 nouveau_print_bitfield_names(nsource, nsource_names);
433 if (dev_priv->card_type < NV_10) 459 printk(", nStatus:");
434 nouveau_print_bitfield_names(nstatus, nstatus_names); 460 if (dev_priv->card_type < NV_10)
435 else 461 nouveau_print_bitfield_names(nstatus, nstatus_names);
436 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10); 462 else
437 printk("\n"); 463 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
464 printk("\n");
465 }
438 466
439 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x " 467 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
440 "Data 0x%08x:0x%08x\n", 468 "Data 0x%08x:0x%08x\n",
@@ -578,27 +606,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
578} 606}
579 607
580static void 608static void
609nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
610{
611 struct drm_nouveau_private *dev_priv = dev->dev_private;
612 uint32_t trap[6];
613 int i, ch;
614 uint32_t idx = nv_rd32(dev, 0x100c90);
615 if (idx & 0x80000000) {
616 idx &= 0xffffff;
617 if (display) {
618 for (i = 0; i < 6; i++) {
619 nv_wr32(dev, 0x100c90, idx | i << 24);
620 trap[i] = nv_rd32(dev, 0x100c94);
621 }
622 for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
623 struct nouveau_channel *chan = dev_priv->fifos[ch];
624
625 if (!chan || !chan->ramin)
626 continue;
627
628 if (trap[1] == chan->ramin->instance >> 12)
629 break;
630 }
631 NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
632 name, (trap[5]&0x100?"read":"write"),
633 trap[5]&0xff, trap[4]&0xffff,
634 trap[3]&0xffff, trap[0], trap[2], ch);
635 }
636 nv_wr32(dev, 0x100c90, idx | 0x80000000);
637 } else if (display) {
638 NV_INFO(dev, "%s - no VM fault?\n", name);
639 }
640}
641
642static struct nouveau_enum_names nv50_mp_exec_error_names[] =
643{
644 { 3, "STACK_UNDERFLOW" },
645 { 4, "QUADON_ACTIVE" },
646 { 8, "TIMEOUT" },
647 { 0x10, "INVALID_OPCODE" },
648 { 0x40, "BREAKPOINT" },
649};
650
651static void
652nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
653{
654 struct drm_nouveau_private *dev_priv = dev->dev_private;
655 uint32_t units = nv_rd32(dev, 0x1540);
656 uint32_t addr, mp10, status, pc, oplow, ophigh;
657 int i;
658 int mps = 0;
659 for (i = 0; i < 4; i++) {
660 if (!(units & 1 << (i+24)))
661 continue;
662 if (dev_priv->chipset < 0xa0)
663 addr = 0x408200 + (tpid << 12) + (i << 7);
664 else
665 addr = 0x408100 + (tpid << 11) + (i << 7);
666 mp10 = nv_rd32(dev, addr + 0x10);
667 status = nv_rd32(dev, addr + 0x14);
668 if (!status)
669 continue;
670 if (display) {
671 nv_rd32(dev, addr + 0x20);
672 pc = nv_rd32(dev, addr + 0x24);
673 oplow = nv_rd32(dev, addr + 0x70);
674 ophigh= nv_rd32(dev, addr + 0x74);
675 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
676 "TP %d MP %d: ", tpid, i);
677 nouveau_print_enum_names(status,
678 nv50_mp_exec_error_names);
679 printk(" at %06x warp %d, opcode %08x %08x\n",
680 pc&0xffffff, pc >> 24,
681 oplow, ophigh);
682 }
683 nv_wr32(dev, addr + 0x10, mp10);
684 nv_wr32(dev, addr + 0x14, 0);
685 mps++;
686 }
687 if (!mps && display)
688 NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
689 "No MPs claiming errors?\n", tpid);
690}
691
692static void
693nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
694 uint32_t ustatus_new, int display, const char *name)
695{
696 struct drm_nouveau_private *dev_priv = dev->dev_private;
697 int tps = 0;
698 uint32_t units = nv_rd32(dev, 0x1540);
699 int i, r;
700 uint32_t ustatus_addr, ustatus;
701 for (i = 0; i < 16; i++) {
702 if (!(units & (1 << i)))
703 continue;
704 if (dev_priv->chipset < 0xa0)
705 ustatus_addr = ustatus_old + (i << 12);
706 else
707 ustatus_addr = ustatus_new + (i << 11);
708 ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
709 if (!ustatus)
710 continue;
711 tps++;
712 switch (type) {
713 case 6: /* texture error... unknown for now */
714 nv50_pfb_vm_trap(dev, display, name);
715 if (display) {
716 NV_ERROR(dev, "magic set %d:\n", i);
717 for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
718 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
719 nv_rd32(dev, r));
720 }
721 break;
722 case 7: /* MP error */
723 if (ustatus & 0x00010000) {
724 nv50_pgraph_mp_trap(dev, i, display);
725 ustatus &= ~0x00010000;
726 }
727 break;
728 case 8: /* TPDMA error */
729 {
730 uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
731 uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
732 uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
733 uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
734 uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
735 uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
736 uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
737 nv50_pfb_vm_trap(dev, display, name);
738 /* 2d engine destination */
739 if (ustatus & 0x00000010) {
740 if (display) {
741 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
742 i, e14, e10);
743 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
744 i, e0c, e18, e1c, e20, e24);
745 }
746 ustatus &= ~0x00000010;
747 }
748 /* Render target */
749 if (ustatus & 0x00000040) {
750 if (display) {
751 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
752 i, e14, e10);
753 NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
754 i, e0c, e18, e1c, e20, e24);
755 }
756 ustatus &= ~0x00000040;
757 }
758 /* CUDA memory: l[], g[] or stack. */
759 if (ustatus & 0x00000080) {
760 if (display) {
761 if (e18 & 0x80000000) {
762 /* g[] read fault? */
763 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
764 i, e14, e10 | ((e18 >> 24) & 0x1f));
765 e18 &= ~0x1f000000;
766 } else if (e18 & 0xc) {
767 /* g[] write fault? */
768 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
769 i, e14, e10 | ((e18 >> 7) & 0x1f));
770 e18 &= ~0x00000f80;
771 } else {
772 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
773 i, e14, e10);
774 }
775 NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
776 i, e0c, e18, e1c, e20, e24);
777 }
778 ustatus &= ~0x00000080;
779 }
780 }
781 break;
782 }
783 if (ustatus) {
784 if (display)
785 NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
786 }
787 nv_wr32(dev, ustatus_addr, 0xc0000000);
788 }
789
790 if (!tps && display)
791 NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
792}
793
794static void
795nv50_pgraph_trap_handler(struct drm_device *dev)
796{
797 struct nouveau_pgraph_trap trap;
798 uint32_t status = nv_rd32(dev, 0x400108);
799 uint32_t ustatus;
800 int display = nouveau_ratelimit();
801
802
803 if (!status && display) {
804 nouveau_graph_trap_info(dev, &trap);
805 nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
806 NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
807 }
808
809 /* DISPATCH: Relays commands to other units and handles NOTIFY,
810 * COND, QUERY. If you get a trap from it, the command is still stuck
811 * in DISPATCH and you need to do something about it. */
812 if (status & 0x001) {
813 ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
814 if (!ustatus && display) {
815 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
816 }
817
818 /* Known to be triggered by screwed up NOTIFY and COND... */
819 if (ustatus & 0x00000001) {
820 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
821 nv_wr32(dev, 0x400500, 0);
822 if (nv_rd32(dev, 0x400808) & 0x80000000) {
823 if (display) {
824 if (nouveau_graph_trapped_channel(dev, &trap.channel))
825 trap.channel = -1;
826 trap.class = nv_rd32(dev, 0x400814);
827 trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
828 trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
829 trap.data = nv_rd32(dev, 0x40080c);
830 trap.data2 = nv_rd32(dev, 0x400810);
831 nouveau_graph_dump_trap_info(dev,
832 "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
833 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
834 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
835 }
836 nv_wr32(dev, 0x400808, 0);
837 } else if (display) {
838 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
839 }
840 nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
841 nv_wr32(dev, 0x400848, 0);
842 ustatus &= ~0x00000001;
843 }
844 if (ustatus & 0x00000002) {
845 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
846 nv_wr32(dev, 0x400500, 0);
847 if (nv_rd32(dev, 0x40084c) & 0x80000000) {
848 if (display) {
849 if (nouveau_graph_trapped_channel(dev, &trap.channel))
850 trap.channel = -1;
851 trap.class = nv_rd32(dev, 0x400814);
852 trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
853 trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
854 trap.data = nv_rd32(dev, 0x40085c);
855 trap.data2 = 0;
856 nouveau_graph_dump_trap_info(dev,
857 "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
858 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
859 }
860 nv_wr32(dev, 0x40084c, 0);
861 } else if (display) {
862 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
863 }
864 ustatus &= ~0x00000002;
865 }
866 if (ustatus && display)
867 NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
868 nv_wr32(dev, 0x400804, 0xc0000000);
869 nv_wr32(dev, 0x400108, 0x001);
870 status &= ~0x001;
871 }
872
873 /* TRAPs other than dispatch use the "normal" trap regs. */
874 if (status && display) {
875 nouveau_graph_trap_info(dev, &trap);
876 nouveau_graph_dump_trap_info(dev,
877 "PGRAPH_TRAP", &trap);
878 }
879
880 /* M2MF: Memory to memory copy engine. */
881 if (status & 0x002) {
882 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
883 if (!ustatus && display) {
884 NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
885 }
886 if (ustatus & 0x00000001) {
887 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
888 ustatus &= ~0x00000001;
889 }
890 if (ustatus & 0x00000002) {
891 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
892 ustatus &= ~0x00000002;
893 }
894 if (ustatus & 0x00000004) {
895 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
896 ustatus &= ~0x00000004;
897 }
898 NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
899 nv_rd32(dev, 0x406804),
900 nv_rd32(dev, 0x406808),
901 nv_rd32(dev, 0x40680c),
902 nv_rd32(dev, 0x406810));
903 if (ustatus && display)
904 NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
905 /* No sane way found yet -- just reset the bugger. */
906 nv_wr32(dev, 0x400040, 2);
907 nv_wr32(dev, 0x400040, 0);
908 nv_wr32(dev, 0x406800, 0xc0000000);
909 nv_wr32(dev, 0x400108, 0x002);
910 status &= ~0x002;
911 }
912
913 /* VFETCH: Fetches data from vertex buffers. */
914 if (status & 0x004) {
915 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
916 if (!ustatus && display) {
917 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
918 }
919 if (ustatus & 0x00000001) {
920 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
921 NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
922 nv_rd32(dev, 0x400c00),
923 nv_rd32(dev, 0x400c08),
924 nv_rd32(dev, 0x400c0c),
925 nv_rd32(dev, 0x400c10));
926 ustatus &= ~0x00000001;
927 }
928 if (ustatus && display)
929 NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
930 nv_wr32(dev, 0x400c04, 0xc0000000);
931 nv_wr32(dev, 0x400108, 0x004);
932 status &= ~0x004;
933 }
934
935 /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
936 if (status & 0x008) {
937 ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
938 if (!ustatus && display) {
939 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
940 }
941 if (ustatus & 0x00000001) {
942 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
943 NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
944 nv_rd32(dev, 0x401804),
945 nv_rd32(dev, 0x401808),
946 nv_rd32(dev, 0x40180c),
947 nv_rd32(dev, 0x401810));
948 ustatus &= ~0x00000001;
949 }
950 if (ustatus && display)
951 NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
952 /* No sane way found yet -- just reset the bugger. */
953 nv_wr32(dev, 0x400040, 0x80);
954 nv_wr32(dev, 0x400040, 0);
955 nv_wr32(dev, 0x401800, 0xc0000000);
956 nv_wr32(dev, 0x400108, 0x008);
957 status &= ~0x008;
958 }
959
960 /* CCACHE: Handles code and c[] caches and fills them. */
961 if (status & 0x010) {
962 ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
963 if (!ustatus && display) {
964 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
965 }
966 if (ustatus & 0x00000001) {
967 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
968 NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
969 nv_rd32(dev, 0x405800),
970 nv_rd32(dev, 0x405804),
971 nv_rd32(dev, 0x405808),
972 nv_rd32(dev, 0x40580c),
973 nv_rd32(dev, 0x405810),
974 nv_rd32(dev, 0x405814),
975 nv_rd32(dev, 0x40581c));
976 ustatus &= ~0x00000001;
977 }
978 if (ustatus && display)
979 NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
980 nv_wr32(dev, 0x405018, 0xc0000000);
981 nv_wr32(dev, 0x400108, 0x010);
982 status &= ~0x010;
983 }
984
985 /* Unknown, not seen yet... 0x402000 is the only trap status reg
986 * remaining, so try to handle it anyway. Perhaps related to that
987 * unknown DMA slot on tesla? */
988 if (status & 0x20) {
989 nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
990 ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
991 if (display)
992 NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
993 nv_wr32(dev, 0x402000, 0xc0000000);
994 /* no status modifiction on purpose */
995 }
996
997 /* TEXTURE: CUDA texturing units */
998 if (status & 0x040) {
999 nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
1000 "PGRAPH_TRAP_TEXTURE");
1001 nv_wr32(dev, 0x400108, 0x040);
1002 status &= ~0x040;
1003 }
1004
1005 /* MP: CUDA execution engines. */
1006 if (status & 0x080) {
1007 nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
1008 "PGRAPH_TRAP_MP");
1009 nv_wr32(dev, 0x400108, 0x080);
1010 status &= ~0x080;
1011 }
1012
1013 /* TPDMA: Handles TP-initiated uncached memory accesses:
1014 * l[], g[], stack, 2d surfaces, render targets. */
1015 if (status & 0x100) {
1016 nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
1017 "PGRAPH_TRAP_TPDMA");
1018 nv_wr32(dev, 0x400108, 0x100);
1019 status &= ~0x100;
1020 }
1021
1022 if (status) {
1023 if (display)
1024 NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
1025 status);
1026 nv_wr32(dev, 0x400108, status);
1027 }
1028}
1029
1030/* There must be a *lot* of these. Will take some time to gather them up. */
1031static struct nouveau_enum_names nv50_data_error_names[] =
1032{
1033 { 4, "INVALID_VALUE" },
1034 { 5, "INVALID_ENUM" },
1035 { 8, "INVALID_OBJECT" },
1036 { 0xc, "INVALID_BITFIELD" },
1037 { 0x28, "MP_NO_REG_SPACE" },
1038 { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
1039};
1040
1041static void
581nv50_pgraph_irq_handler(struct drm_device *dev) 1042nv50_pgraph_irq_handler(struct drm_device *dev)
582{ 1043{
1044 struct nouveau_pgraph_trap trap;
1045 int unhandled = 0;
583 uint32_t status; 1046 uint32_t status;
584 1047
585 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) { 1048 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
586 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE); 1049 /* NOTIFY: You've set a NOTIFY an a command and it's done. */
587
588 if (status & 0x00000001) { 1050 if (status & 0x00000001) {
589 nouveau_pgraph_intr_notify(dev, nsource); 1051 nouveau_graph_trap_info(dev, &trap);
1052 if (nouveau_ratelimit())
1053 nouveau_graph_dump_trap_info(dev,
1054 "PGRAPH_NOTIFY", &trap);
590 status &= ~0x00000001; 1055 status &= ~0x00000001;
591 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001); 1056 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
592 } 1057 }
593 1058
594 if (status & 0x00000010) { 1059 /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
595 nouveau_pgraph_intr_error(dev, nsource | 1060 * when you write 0x200 to 0x50c0 method 0x31c. */
596 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD); 1061 if (status & 0x00000002) {
1062 nouveau_graph_trap_info(dev, &trap);
1063 if (nouveau_ratelimit())
1064 nouveau_graph_dump_trap_info(dev,
1065 "PGRAPH_COMPUTE_QUERY", &trap);
1066 status &= ~0x00000002;
1067 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
1068 }
597 1069
1070 /* Unknown, never seen: 0x4 */
1071
1072 /* ILLEGAL_MTHD: You used a wrong method for this class. */
1073 if (status & 0x00000010) {
1074 nouveau_graph_trap_info(dev, &trap);
1075 if (nouveau_pgraph_intr_swmthd(dev, &trap))
1076 unhandled = 1;
1077 if (unhandled && nouveau_ratelimit())
1078 nouveau_graph_dump_trap_info(dev,
1079 "PGRAPH_ILLEGAL_MTHD", &trap);
598 status &= ~0x00000010; 1080 status &= ~0x00000010;
599 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010); 1081 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
600 } 1082 }
601 1083
1084 /* ILLEGAL_CLASS: You used a wrong class. */
1085 if (status & 0x00000020) {
1086 nouveau_graph_trap_info(dev, &trap);
1087 if (nouveau_ratelimit())
1088 nouveau_graph_dump_trap_info(dev,
1089 "PGRAPH_ILLEGAL_CLASS", &trap);
1090 status &= ~0x00000020;
1091 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
1092 }
1093
1094 /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
1095 if (status & 0x00000040) {
1096 nouveau_graph_trap_info(dev, &trap);
1097 if (nouveau_ratelimit())
1098 nouveau_graph_dump_trap_info(dev,
1099 "PGRAPH_DOUBLE_NOTIFY", &trap);
1100 status &= ~0x00000040;
1101 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
1102 }
1103
1104 /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
602 if (status & 0x00001000) { 1105 if (status & 0x00001000) {
603 nv_wr32(dev, 0x400500, 0x00000000); 1106 nv_wr32(dev, 0x400500, 0x00000000);
604 nv_wr32(dev, NV03_PGRAPH_INTR, 1107 nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1116,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
613 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; 1116 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
614 } 1117 }
615 1118
616 if (status & 0x00100000) { 1119 /* BUFFER_NOTIFY: Your m2mf transfer finished */
617 nouveau_pgraph_intr_error(dev, nsource | 1120 if (status & 0x00010000) {
618 NV03_PGRAPH_NSOURCE_DATA_ERROR); 1121 nouveau_graph_trap_info(dev, &trap);
1122 if (nouveau_ratelimit())
1123 nouveau_graph_dump_trap_info(dev,
1124 "PGRAPH_BUFFER_NOTIFY", &trap);
1125 status &= ~0x00010000;
1126 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
1127 }
619 1128
1129 /* DATA_ERROR: Invalid value for this method, or invalid
1130 * state in current PGRAPH context for this operation */
1131 if (status & 0x00100000) {
1132 nouveau_graph_trap_info(dev, &trap);
1133 if (nouveau_ratelimit()) {
1134 nouveau_graph_dump_trap_info(dev,
1135 "PGRAPH_DATA_ERROR", &trap);
1136 NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
1137 nouveau_print_enum_names(nv_rd32(dev, 0x400110),
1138 nv50_data_error_names);
1139 printk("\n");
1140 }
620 status &= ~0x00100000; 1141 status &= ~0x00100000;
621 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000); 1142 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
622 } 1143 }
623 1144
1145 /* TRAP: Something bad happened in the middle of command
1146 * execution. Has a billion types, subtypes, and even
1147 * subsubtypes. */
624 if (status & 0x00200000) { 1148 if (status & 0x00200000) {
625 int r; 1149 nv50_pgraph_trap_handler(dev);
626
627 nouveau_pgraph_intr_error(dev, nsource |
628 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
629
630 NV_ERROR(dev, "magic set 1:\n");
631 for (r = 0x408900; r <= 0x408910; r += 4)
632 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
633 nv_rd32(dev, r));
634 nv_wr32(dev, 0x408900,
635 nv_rd32(dev, 0x408904) | 0xc0000000);
636 for (r = 0x408e08; r <= 0x408e24; r += 4)
637 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
638 nv_rd32(dev, r));
639 nv_wr32(dev, 0x408e08,
640 nv_rd32(dev, 0x408e08) | 0xc0000000);
641
642 NV_ERROR(dev, "magic set 2:\n");
643 for (r = 0x409900; r <= 0x409910; r += 4)
644 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
645 nv_rd32(dev, r));
646 nv_wr32(dev, 0x409900,
647 nv_rd32(dev, 0x409904) | 0xc0000000);
648 for (r = 0x409e08; r <= 0x409e24; r += 4)
649 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
650 nv_rd32(dev, r));
651 nv_wr32(dev, 0x409e08,
652 nv_rd32(dev, 0x409e08) | 0xc0000000);
653
654 status &= ~0x00200000; 1150 status &= ~0x00200000;
655 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
656 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000); 1151 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
657 } 1152 }
658 1153
1154 /* Unknown, never seen: 0x00400000 */
1155
1156 /* SINGLE_STEP: Happens on every method if you turned on
1157 * single stepping in 40008c */
1158 if (status & 0x01000000) {
1159 nouveau_graph_trap_info(dev, &trap);
1160 if (nouveau_ratelimit())
1161 nouveau_graph_dump_trap_info(dev,
1162 "PGRAPH_SINGLE_STEP", &trap);
1163 status &= ~0x01000000;
1164 nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
1165 }
1166
1167 /* 0x02000000 happens when you pause a ctxprog...
1168 * but the only way this can happen that I know is by
1169 * poking the relevant MMIO register, and we don't
1170 * do that. */
1171
659 if (status) { 1172 if (status) {
660 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", 1173 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
661 status); 1174 status);
@@ -672,7 +1185,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
672 } 1185 }
673 1186
674 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING); 1187 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
675 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31)); 1188 if (nv_rd32(dev, 0x400824) & (1 << 31))
1189 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
676} 1190}
677 1191
678static void 1192static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2dc09dbd817d..775a7017af64 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -347,6 +347,20 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
347 return -EBUSY; 347 return -EBUSY;
348 } 348 }
349 349
350 nv_wr32(dev, 0x100c80, 0x00040001);
351 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
352 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
353 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
354 return -EBUSY;
355 }
356
357 nv_wr32(dev, 0x100c80, 0x00060001);
358 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
359 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
360 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
361 return -EBUSY;
362 }
363
350 return 0; 364 return 0;
351} 365}
352 366
@@ -387,6 +401,20 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
387 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { 401 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
388 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); 402 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
389 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); 403 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
404 return;
405 }
406
407 nv_wr32(dev, 0x100c80, 0x00040001);
408 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
409 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
410 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
411 return;
412 }
413
414 nv_wr32(dev, 0x100c80, 0x00060001);
415 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
416 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
417 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
390 } 418 }
391} 419}
392 420
@@ -449,9 +477,30 @@ void nouveau_mem_close(struct drm_device *dev)
449 } 477 }
450} 478}
451 479
452/*XXX won't work on BSD because of pci_read_config_dword */
453static uint32_t 480static uint32_t
454nouveau_mem_fb_amount_igp(struct drm_device *dev) 481nouveau_mem_detect_nv04(struct drm_device *dev)
482{
483 uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
484
485 if (boot0 & 0x00000100)
486 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
487
488 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
489 case NV04_BOOT_0_RAM_AMOUNT_32MB:
490 return 32 * 1024 * 1024;
491 case NV04_BOOT_0_RAM_AMOUNT_16MB:
492 return 16 * 1024 * 1024;
493 case NV04_BOOT_0_RAM_AMOUNT_8MB:
494 return 8 * 1024 * 1024;
495 case NV04_BOOT_0_RAM_AMOUNT_4MB:
496 return 4 * 1024 * 1024;
497 }
498
499 return 0;
500}
501
502static uint32_t
503nouveau_mem_detect_nforce(struct drm_device *dev)
455{ 504{
456 struct drm_nouveau_private *dev_priv = dev->dev_private; 505 struct drm_nouveau_private *dev_priv = dev->dev_private;
457 struct pci_dev *bridge; 506 struct pci_dev *bridge;
@@ -463,11 +512,11 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
463 return 0; 512 return 0;
464 } 513 }
465 514
466 if (dev_priv->flags&NV_NFORCE) { 515 if (dev_priv->flags & NV_NFORCE) {
467 pci_read_config_dword(bridge, 0x7C, &mem); 516 pci_read_config_dword(bridge, 0x7C, &mem);
468 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; 517 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
469 } else 518 } else
470 if (dev_priv->flags&NV_NFORCE2) { 519 if (dev_priv->flags & NV_NFORCE2) {
471 pci_read_config_dword(bridge, 0x84, &mem); 520 pci_read_config_dword(bridge, 0x84, &mem);
472 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; 521 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
473 } 522 }
@@ -477,50 +526,32 @@ nouveau_mem_fb_amount_igp(struct drm_device *dev)
477} 526}
478 527
479/* returns the amount of FB ram in bytes */ 528/* returns the amount of FB ram in bytes */
480uint64_t nouveau_mem_fb_amount(struct drm_device *dev) 529int
530nouveau_mem_detect(struct drm_device *dev)
481{ 531{
482 struct drm_nouveau_private *dev_priv = dev->dev_private; 532 struct drm_nouveau_private *dev_priv = dev->dev_private;
483 uint32_t boot0; 533
484 534 if (dev_priv->card_type == NV_04) {
485 switch (dev_priv->card_type) { 535 dev_priv->vram_size = nouveau_mem_detect_nv04(dev);
486 case NV_04: 536 } else
487 boot0 = nv_rd32(dev, NV03_BOOT_0); 537 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
488 if (boot0 & 0x00000100) 538 dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
489 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; 539 } else {
490 540 dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
491 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { 541 dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
492 case NV04_BOOT_0_RAM_AMOUNT_32MB: 542 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
493 return 32 * 1024 * 1024; 543 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
494 case NV04_BOOT_0_RAM_AMOUNT_16MB:
495 return 16 * 1024 * 1024;
496 case NV04_BOOT_0_RAM_AMOUNT_8MB:
497 return 8 * 1024 * 1024;
498 case NV04_BOOT_0_RAM_AMOUNT_4MB:
499 return 4 * 1024 * 1024;
500 }
501 break;
502 case NV_10:
503 case NV_20:
504 case NV_30:
505 case NV_40:
506 case NV_50:
507 default:
508 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
509 return nouveau_mem_fb_amount_igp(dev);
510 } else {
511 uint64_t mem;
512 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
513 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
514 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
515 return mem * 1024 * 1024;
516 }
517 break;
518 } 544 }
519 545
520 NV_ERROR(dev, 546 NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
521 "Unable to detect video ram size. Please report your setup to " 547 if (dev_priv->vram_sys_base) {
522 DRIVER_EMAIL "\n"); 548 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
523 return 0; 549 dev_priv->vram_sys_base);
550 }
551
552 if (dev_priv->vram_size)
553 return 0;
554 return -ENOMEM;
524} 555}
525 556
526#if __OS_HAS_AGP 557#if __OS_HAS_AGP
@@ -631,15 +662,12 @@ nouveau_mem_init(struct drm_device *dev)
631 spin_lock_init(&dev_priv->ttm.bo_list_lock); 662 spin_lock_init(&dev_priv->ttm.bo_list_lock);
632 spin_lock_init(&dev_priv->tile.lock); 663 spin_lock_init(&dev_priv->tile.lock);
633 664
634 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); 665 dev_priv->fb_available_size = dev_priv->vram_size;
635
636 dev_priv->fb_mappable_pages = dev_priv->fb_available_size; 666 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
637 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) 667 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
638 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); 668 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
639 dev_priv->fb_mappable_pages >>= PAGE_SHIFT; 669 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
640 670
641 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
642
643 /* remove reserved space at end of vram from available amount */ 671 /* remove reserved space at end of vram from available amount */
644 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; 672 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
645 dev_priv->fb_aper_free = dev_priv->fb_available_size; 673 dev_priv->fb_aper_free = dev_priv->fb_available_size;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index ed1590577b6c..1d6ee8b55154 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,6 +1,7 @@
1#include "drmP.h" 1#include "drmP.h"
2#include "nouveau_drv.h" 2#include "nouveau_drv.h"
3#include <linux/pagemap.h> 3#include <linux/pagemap.h>
4#include <linux/slab.h>
4 5
5#define NV_CTXDMA_PAGE_SHIFT 12 6#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) 7#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
@@ -171,6 +172,24 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
171 } 172 }
172 dev_priv->engine.instmem.finish_access(nvbe->dev); 173 dev_priv->engine.instmem.finish_access(nvbe->dev);
173 174
175 if (dev_priv->card_type == NV_50) {
176 nv_wr32(dev, 0x100c80, 0x00050001);
177 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
178 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
179 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
180 nv_rd32(dev, 0x100c80));
181 return -EBUSY;
182 }
183
184 nv_wr32(dev, 0x100c80, 0x00000001);
185 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
186 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
187 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
188 nv_rd32(dev, 0x100c80));
189 return -EBUSY;
190 }
191 }
192
174 nvbe->bound = false; 193 nvbe->bound = false;
175 return 0; 194 return 0;
176} 195}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index eb8f084d5f53..e1710640a278 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/swab.h> 26#include <linux/swab.h>
27#include <linux/slab.h>
27#include "drmP.h" 28#include "drmP.h"
28#include "drm.h" 29#include "drm.h"
29#include "drm_sarea.h" 30#include "drm_sarea.h"
@@ -35,7 +36,6 @@
35#include "nouveau_drm.h" 36#include "nouveau_drm.h"
36#include "nv50_display.h" 37#include "nv50_display.h"
37 38
38static int nouveau_stub_init(struct drm_device *dev) { return 0; }
39static void nouveau_stub_takedown(struct drm_device *dev) {} 39static void nouveau_stub_takedown(struct drm_device *dev) {}
40 40
41static int nouveau_init_engine_ptrs(struct drm_device *dev) 41static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -277,8 +277,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
277 engine->timer.init = nv04_timer_init; 277 engine->timer.init = nv04_timer_init;
278 engine->timer.read = nv04_timer_read; 278 engine->timer.read = nv04_timer_read;
279 engine->timer.takedown = nv04_timer_takedown; 279 engine->timer.takedown = nv04_timer_takedown;
280 engine->fb.init = nouveau_stub_init; 280 engine->fb.init = nv50_fb_init;
281 engine->fb.takedown = nouveau_stub_takedown; 281 engine->fb.takedown = nv50_fb_takedown;
282 engine->graph.grclass = nv50_graph_grclass; 282 engine->graph.grclass = nv50_graph_grclass;
283 engine->graph.init = nv50_graph_init; 283 engine->graph.init = nv50_graph_init;
284 engine->graph.takedown = nv50_graph_takedown; 284 engine->graph.takedown = nv50_graph_takedown;
@@ -341,7 +341,7 @@ nouveau_card_init_channel(struct drm_device *dev)
341 341
342 gpuobj = NULL; 342 gpuobj = NULL;
343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 343 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
344 0, nouveau_mem_fb_amount(dev), 344 0, dev_priv->vram_size,
345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, 345 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
346 &gpuobj); 346 &gpuobj);
347 if (ret) 347 if (ret)
@@ -427,6 +427,10 @@ nouveau_card_init(struct drm_device *dev)
427 goto out; 427 goto out;
428 } 428 }
429 429
430 ret = nouveau_mem_detect(dev);
431 if (ret)
432 goto out_bios;
433
430 ret = nouveau_gpuobj_early_init(dev); 434 ret = nouveau_gpuobj_early_init(dev);
431 if (ret) 435 if (ret)
432 goto out_bios; 436 goto out_bios;
@@ -502,7 +506,7 @@ nouveau_card_init(struct drm_device *dev)
502 else 506 else
503 ret = nv04_display_create(dev); 507 ret = nv04_display_create(dev);
504 if (ret) 508 if (ret)
505 goto out_irq; 509 goto out_channel;
506 } 510 }
507 511
508 ret = nouveau_backlight_init(dev); 512 ret = nouveau_backlight_init(dev);
@@ -516,6 +520,11 @@ nouveau_card_init(struct drm_device *dev)
516 520
517 return 0; 521 return 0;
518 522
523out_channel:
524 if (dev_priv->channel) {
525 nouveau_channel_free(dev_priv->channel);
526 dev_priv->channel = NULL;
527 }
519out_irq: 528out_irq:
520 drm_irq_uninstall(dev); 529 drm_irq_uninstall(dev);
521out_fifo: 530out_fifo:
@@ -533,6 +542,7 @@ out_mc:
533out_gpuobj: 542out_gpuobj:
534 nouveau_gpuobj_takedown(dev); 543 nouveau_gpuobj_takedown(dev);
535out_mem: 544out_mem:
545 nouveau_sgdma_takedown(dev);
536 nouveau_mem_close(dev); 546 nouveau_mem_close(dev);
537out_instmem: 547out_instmem:
538 engine->instmem.takedown(dev); 548 engine->instmem.takedown(dev);
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index a1d1ebb073d9..eba687f1099e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
230 struct drm_framebuffer *fb = crtc->fb; 230 struct drm_framebuffer *fb = crtc->fb;
231 231
232 /* Calculate our timings */ 232 /* Calculate our timings */
233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1; 233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
234 int horizStart = (mode->crtc_hsync_start >> 3) - 1; 234 int horizStart = (mode->crtc_hsync_start >> 3) + 1;
235 int horizEnd = (mode->crtc_hsync_end >> 3) - 1; 235 int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
236 int horizTotal = (mode->crtc_htotal >> 3) - 5; 236 int horizTotal = (mode->crtc_htotal >> 3) - 5;
237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1; 237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1; 238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 3da90c2c4e63..813b25cec726 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 return; 118 return;
119 } 119 }
120 120
121 width = ALIGN(image->width, 32); 121 width = ALIGN(image->width, 8);
122 dsize = (width * image->height) >> 5; 122 dsize = ALIGN(width * image->height, 32) >> 5;
123 123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR || 124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) { 125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
136 ((image->dx + image->width) & 0xffff)); 136 ((image->dx + image->width) & 0xffff));
137 OUT_RING(chan, bg); 137 OUT_RING(chan, bg);
138 OUT_RING(chan, fg); 138 OUT_RING(chan, fg);
139 OUT_RING(chan, (image->height << 16) | image->width);
140 OUT_RING(chan, (image->height << 16) | width); 139 OUT_RING(chan, (image->height << 16) | width);
140 OUT_RING(chan, (image->height << 16) | image->width);
141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
142 142
143 while (dsize) { 143 while (dsize) {
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 6b2ef4a9fce1..500ccfd3a0b8 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -278,7 +278,7 @@ nv40_fifo_init_ramxx(struct drm_device *dev)
278 default: 278 default:
279 nv_wr32(dev, 0x2230, 0); 279 nv_wr32(dev, 0x2230, 0);
280 nv_wr32(dev, NV40_PFIFO_RAMFC, 280 nv_wr32(dev, NV40_PFIFO_RAMFC,
281 ((nouveau_mem_fb_amount(dev) - 512 * 1024 + 281 ((dev_priv->vram_size - 512 * 1024 +
282 dev_priv->ramfc_offset) >> 16) | (3 << 16)); 282 dev_priv->ramfc_offset) >> 16) | (3 << 16));
283 break; 283 break;
284 } 284 }
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 53e8afe1dcd1..0616c96e4b67 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -335,6 +335,27 @@ nv40_graph_init(struct drm_device *dev)
335 nv_wr32(dev, 0x400b38, 0x2ffff800); 335 nv_wr32(dev, 0x400b38, 0x2ffff800);
336 nv_wr32(dev, 0x400b3c, 0x00006000); 336 nv_wr32(dev, 0x400b3c, 0x00006000);
337 337
338 /* Tiling related stuff. */
339 switch (dev_priv->chipset) {
340 case 0x44:
341 case 0x4a:
342 nv_wr32(dev, 0x400bc4, 0x1003d888);
343 nv_wr32(dev, 0x400bbc, 0xb7a7b500);
344 break;
345 case 0x46:
346 nv_wr32(dev, 0x400bc4, 0x0000e024);
347 nv_wr32(dev, 0x400bbc, 0xb7a7b520);
348 break;
349 case 0x4c:
350 case 0x4e:
351 case 0x67:
352 nv_wr32(dev, 0x400bc4, 0x1003d888);
353 nv_wr32(dev, 0x400bbc, 0xb7a7b540);
354 break;
355 default:
356 break;
357 }
358
338 /* Turn all the tiling regions off. */ 359 /* Turn all the tiling regions off. */
339 for (i = 0; i < pfb->num_tiles; i++) 360 for (i = 0; i < pfb->num_tiles; i++)
340 nv40_graph_set_region_tiling(dev, i, 0, 0, 0); 361 nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 61a89f2dc553..649db4c1b690 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -143,7 +143,7 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
143 } 143 }
144 144
145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19, 145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
146 0, nouveau_mem_fb_amount(dev)); 146 0, dev_priv->vram_size);
147 if (ret) { 147 if (ret) {
148 nv50_evo_channel_del(pchan); 148 nv50_evo_channel_del(pchan);
149 return ret; 149 return ret;
@@ -231,7 +231,7 @@ nv50_display_init(struct drm_device *dev)
231 /* This used to be in crtc unblank, but seems out of place there. */ 231 /* This used to be in crtc unblank, but seems out of place there. */
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); 232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */ 233 /* RAM is clamped to 256 MiB. */
234 ram_amount = nouveau_mem_fb_amount(dev); 234 ram_amount = dev_priv->vram_size;
235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); 235 NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024) 236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024; 237 ram_amount = 256*1024*1024;
@@ -522,15 +522,17 @@ int nv50_display_create(struct drm_device *dev)
522 } 522 }
523 523
524 for (i = 0 ; i < dcb->connector.entries; i++) { 524 for (i = 0 ; i < dcb->connector.entries; i++) {
525 if (i != 0 && dcb->connector.entry[i].index == 525 if (i != 0 && dcb->connector.entry[i].index2 ==
526 dcb->connector.entry[i - 1].index) 526 dcb->connector.entry[i - 1].index2)
527 continue; 527 continue;
528 nouveau_connector_create(dev, &dcb->connector.entry[i]); 528 nouveau_connector_create(dev, &dcb->connector.entry[i]);
529 } 529 }
530 530
531 ret = nv50_display_init(dev); 531 ret = nv50_display_init(dev);
532 if (ret) 532 if (ret) {
533 nv50_display_destroy(dev);
533 return ret; 534 return ret;
535 }
534 536
535 return 0; 537 return 0;
536} 538}
@@ -885,10 +887,12 @@ nv50_display_error_handler(struct drm_device *dev)
885 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000); 887 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
886} 888}
887 889
888static void 890void
889nv50_display_irq_hotplug(struct drm_device *dev) 891nv50_display_irq_hotplug_bh(struct work_struct *work)
890{ 892{
891 struct drm_nouveau_private *dev_priv = dev->dev_private; 893 struct drm_nouveau_private *dev_priv =
894 container_of(work, struct drm_nouveau_private, hpd_work);
895 struct drm_device *dev = dev_priv->dev;
892 struct drm_connector *connector; 896 struct drm_connector *connector;
893 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 897 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
894 uint32_t unplug_mask, plug_mask, change_mask; 898 uint32_t unplug_mask, plug_mask, change_mask;
@@ -949,8 +953,10 @@ nv50_display_irq_handler(struct drm_device *dev)
949 struct drm_nouveau_private *dev_priv = dev->dev_private; 953 struct drm_nouveau_private *dev_priv = dev->dev_private;
950 uint32_t delayed = 0; 954 uint32_t delayed = 0;
951 955
952 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) 956 if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
953 nv50_display_irq_hotplug(dev); 957 if (!work_pending(&dev_priv->hpd_work))
958 queue_work(dev_priv->wq, &dev_priv->hpd_work);
959 }
954 960
955 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 961 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
956 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); 962 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 3ae8d0725f63..581d405ac014 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -37,6 +37,7 @@
37 37
38void nv50_display_irq_handler(struct drm_device *dev); 38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work); 39void nv50_display_irq_handler_bh(struct work_struct *work);
40void nv50_display_irq_hotplug_bh(struct work_struct *work);
40int nv50_display_init(struct drm_device *dev); 41int nv50_display_init(struct drm_device *dev);
41int nv50_display_create(struct drm_device *dev); 42int nv50_display_create(struct drm_device *dev);
42int nv50_display_destroy(struct drm_device *dev); 43int nv50_display_destroy(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 000000000000..a95e6941ba88
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,32 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv50_fb_init(struct drm_device *dev)
8{
9 /* This is needed to get meaningful information from 100c90
10 * on traps. No idea what these values mean exactly. */
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12
13 switch (dev_priv->chipset) {
14 case 0x50:
15 nv_wr32(dev, 0x100c90, 0x0707ff);
16 break;
17 case 0xa5:
18 case 0xa8:
19 nv_wr32(dev, 0x100c90, 0x0d0fff);
20 break;
21 default:
22 nv_wr32(dev, 0x100c90, 0x1d07ff);
23 break;
24 }
25
26 return 0;
27}
28
29void
30nv50_fb_takedown(struct drm_device *dev)
31{
32}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 993c7126fbde..a8c70e7e9184 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -157,8 +157,11 @@ nv50_fbcon_accel_init(struct fb_info *info)
157 struct drm_nouveau_private *dev_priv = dev->dev_private; 157 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 struct nouveau_channel *chan = dev_priv->channel; 158 struct nouveau_channel *chan = dev_priv->channel;
159 struct nouveau_gpuobj *eng2d = NULL; 159 struct nouveau_gpuobj *eng2d = NULL;
160 uint64_t fb;
160 int ret, format; 161 int ret, format;
161 162
163 fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
164
162 switch (info->var.bits_per_pixel) { 165 switch (info->var.bits_per_pixel) {
163 case 8: 166 case 8:
164 format = 0xf3; 167 format = 0xf3;
@@ -233,7 +236,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
233 BEGIN_RING(chan, NvSub2D, 0x0808, 3); 236 BEGIN_RING(chan, NvSub2D, 0x0808, 3);
234 OUT_RING(chan, 0); 237 OUT_RING(chan, 0);
235 OUT_RING(chan, 0); 238 OUT_RING(chan, 0);
236 OUT_RING(chan, 0); 239 OUT_RING(chan, 1);
237 BEGIN_RING(chan, NvSub2D, 0x081c, 1); 240 BEGIN_RING(chan, NvSub2D, 0x081c, 1);
238 OUT_RING(chan, 1); 241 OUT_RING(chan, 1);
239 BEGIN_RING(chan, NvSub2D, 0x0840, 4); 242 BEGIN_RING(chan, NvSub2D, 0x0840, 4);
@@ -248,9 +251,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
248 OUT_RING(chan, info->fix.line_length); 251 OUT_RING(chan, info->fix.line_length);
249 OUT_RING(chan, info->var.xres_virtual); 252 OUT_RING(chan, info->var.xres_virtual);
250 OUT_RING(chan, info->var.yres_virtual); 253 OUT_RING(chan, info->var.yres_virtual);
251 OUT_RING(chan, 0); 254 OUT_RING(chan, upper_32_bits(fb));
252 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 255 OUT_RING(chan, lower_32_bits(fb));
253 dev_priv->vm_vram_base);
254 BEGIN_RING(chan, NvSub2D, 0x0230, 2); 256 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
255 OUT_RING(chan, format); 257 OUT_RING(chan, format);
256 OUT_RING(chan, 1); 258 OUT_RING(chan, 1);
@@ -258,9 +260,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
258 OUT_RING(chan, info->fix.line_length); 260 OUT_RING(chan, info->fix.line_length);
259 OUT_RING(chan, info->var.xres_virtual); 261 OUT_RING(chan, info->var.xres_virtual);
260 OUT_RING(chan, info->var.yres_virtual); 262 OUT_RING(chan, info->var.yres_virtual);
261 OUT_RING(chan, 0); 263 OUT_RING(chan, upper_32_bits(fb));
262 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys + 264 OUT_RING(chan, lower_32_bits(fb));
263 dev_priv->vm_vram_base);
264 265
265 return 0; 266 return 0;
266} 267}
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
new file mode 100644
index 000000000000..c61782b314e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29static int
30nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
31{
32 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
33
34 if (gpio->line > 32)
35 return -EINVAL;
36
37 *reg = nv50_gpio_reg[gpio->line >> 3];
38 *shift = (gpio->line & 7) << 2;
39 return 0;
40}
41
42int
43nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
44{
45 struct dcb_gpio_entry *gpio;
46 uint32_t r, s, v;
47
48 gpio = nouveau_bios_gpio_entry(dev, tag);
49 if (!gpio)
50 return -ENOENT;
51
52 if (nv50_gpio_location(gpio, &r, &s))
53 return -EINVAL;
54
55 v = nv_rd32(dev, r) >> (s + 2);
56 return ((v & 1) == (gpio->state[1] & 1));
57}
58
59int
60nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
61{
62 struct dcb_gpio_entry *gpio;
63 uint32_t r, s, v;
64
65 gpio = nouveau_bios_gpio_entry(dev, tag);
66 if (!gpio)
67 return -ENOENT;
68
69 if (nv50_gpio_location(gpio, &r, &s))
70 return -EINVAL;
71
72 v = nv_rd32(dev, r) & ~(0x3 << s);
73 v |= (gpio->state[state] ^ 2) << s;
74 nv_wr32(dev, r, v);
75 return 0;
76}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 857a09671a39..b203d06f601f 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
56static void 56static void
57nv50_graph_init_regs__nv(struct drm_device *dev) 57nv50_graph_init_regs__nv(struct drm_device *dev)
58{ 58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 uint32_t units = nv_rd32(dev, 0x1540);
61 int i;
62
59 NV_DEBUG(dev, "\n"); 63 NV_DEBUG(dev, "\n");
60 64
61 nv_wr32(dev, 0x400804, 0xc0000000); 65 nv_wr32(dev, 0x400804, 0xc0000000);
@@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
65 nv_wr32(dev, 0x405018, 0xc0000000); 69 nv_wr32(dev, 0x405018, 0xc0000000);
66 nv_wr32(dev, 0x402000, 0xc0000000); 70 nv_wr32(dev, 0x402000, 0xc0000000);
67 71
72 for (i = 0; i < 16; i++) {
73 if (units & 1 << i) {
74 if (dev_priv->chipset < 0xa0) {
75 nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
76 nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
77 nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
78 } else {
79 nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
80 nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
81 nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
82 }
83 }
84 }
85
68 nv_wr32(dev, 0x400108, 0xffffffff); 86 nv_wr32(dev, 0x400108, 0xffffffff);
69 87
70 nv_wr32(dev, 0x400824, 0x00004000); 88 nv_wr32(dev, 0x400824, 0x00004000);
@@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
229 nouveau_grctx_vals_load(dev, ctx); 247 nouveau_grctx_vals_load(dev, ctx);
230 } 248 }
231 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); 249 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
232 if ((dev_priv->chipset & 0xf0) == 0xa0)
233 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
234 else
235 nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
236 dev_priv->engine.instmem.finish_access(dev); 250 dev_priv->engine.instmem.finish_access(dev);
237 251
238 return 0; 252 return 0;
@@ -396,9 +410,10 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
396 { 0x5039, false, NULL }, /* m2mf */ 410 { 0x5039, false, NULL }, /* m2mf */
397 { 0x502d, false, NULL }, /* 2d */ 411 { 0x502d, false, NULL }, /* 2d */
398 { 0x50c0, false, NULL }, /* compute */ 412 { 0x50c0, false, NULL }, /* compute */
413 { 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
399 { 0x5097, false, NULL }, /* tesla (nv50) */ 414 { 0x5097, false, NULL }, /* tesla (nv50) */
400 { 0x8297, false, NULL }, /* tesla (nv80/nv90) */ 415 { 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
401 { 0x8397, false, NULL }, /* tesla (nva0) */ 416 { 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
402 { 0x8597, false, NULL }, /* tesla (nva8) */ 417 { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
403 {} 418 {}
404}; 419};
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d105fcd42ca0..42a8fb20c1e6 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -55,15 +55,18 @@
55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) 55#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 56#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
57#define CP_FLAG_AUTO_LOAD_PENDING 1 57#define CP_FLAG_AUTO_LOAD_PENDING 1
58#define CP_FLAG_NEWCTX ((2 * 32) + 10)
59#define CP_FLAG_NEWCTX_BUSY 0
60#define CP_FLAG_NEWCTX_DONE 1
58#define CP_FLAG_XFER ((2 * 32) + 11) 61#define CP_FLAG_XFER ((2 * 32) + 11)
59#define CP_FLAG_XFER_IDLE 0 62#define CP_FLAG_XFER_IDLE 0
60#define CP_FLAG_XFER_BUSY 1 63#define CP_FLAG_XFER_BUSY 1
61#define CP_FLAG_NEWCTX ((2 * 32) + 12)
62#define CP_FLAG_NEWCTX_BUSY 0
63#define CP_FLAG_NEWCTX_DONE 1
64#define CP_FLAG_ALWAYS ((2 * 32) + 13) 64#define CP_FLAG_ALWAYS ((2 * 32) + 13)
65#define CP_FLAG_ALWAYS_FALSE 0 65#define CP_FLAG_ALWAYS_FALSE 0
66#define CP_FLAG_ALWAYS_TRUE 1 66#define CP_FLAG_ALWAYS_TRUE 1
67#define CP_FLAG_INTR ((2 * 32) + 15)
68#define CP_FLAG_INTR_NOT_PENDING 0
69#define CP_FLAG_INTR_PENDING 1
67 70
68#define CP_CTX 0x00100000 71#define CP_CTX 0x00100000
69#define CP_CTX_COUNT 0x000f0000 72#define CP_CTX_COUNT 0x000f0000
@@ -174,6 +177,7 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
174 case 0x96: 177 case 0x96:
175 case 0x98: 178 case 0x98:
176 case 0xa0: 179 case 0xa0:
180 case 0xa3:
177 case 0xa5: 181 case 0xa5:
178 case 0xa8: 182 case 0xa8:
179 case 0xaa: 183 case 0xaa:
@@ -214,6 +218,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
214 cp_name(ctx, cp_setup_save); 218 cp_name(ctx, cp_setup_save);
215 cp_set (ctx, UNK1D, SET); 219 cp_set (ctx, UNK1D, SET);
216 cp_wait(ctx, STATUS, BUSY); 220 cp_wait(ctx, STATUS, BUSY);
221 cp_wait(ctx, INTR, PENDING);
222 cp_bra (ctx, STATUS, BUSY, cp_setup_save);
217 cp_set (ctx, UNK01, SET); 223 cp_set (ctx, UNK01, SET);
218 cp_set (ctx, SWAP_DIRECTION, SAVE); 224 cp_set (ctx, SWAP_DIRECTION, SAVE);
219 225
@@ -269,7 +275,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
269 int offset, base; 275 int offset, base;
270 uint32_t units = nv_rd32 (ctx->dev, 0x1540); 276 uint32_t units = nv_rd32 (ctx->dev, 0x1540);
271 277
272 /* 0800 */ 278 /* 0800: DISPATCH */
273 cp_ctx(ctx, 0x400808, 7); 279 cp_ctx(ctx, 0x400808, 7);
274 gr_def(ctx, 0x400814, 0x00000030); 280 gr_def(ctx, 0x400814, 0x00000030);
275 cp_ctx(ctx, 0x400834, 0x32); 281 cp_ctx(ctx, 0x400834, 0x32);
@@ -300,7 +306,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
300 gr_def(ctx, 0x400b20, 0x0001629d); 306 gr_def(ctx, 0x400b20, 0x0001629d);
301 } 307 }
302 308
303 /* 0C00 */ 309 /* 0C00: VFETCH */
304 cp_ctx(ctx, 0x400c08, 0x2); 310 cp_ctx(ctx, 0x400c08, 0x2);
305 gr_def(ctx, 0x400c08, 0x0000fe0c); 311 gr_def(ctx, 0x400c08, 0x0000fe0c);
306 312
@@ -326,7 +332,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
326 cp_ctx(ctx, 0x401540, 0x5); 332 cp_ctx(ctx, 0x401540, 0x5);
327 gr_def(ctx, 0x401550, 0x00001018); 333 gr_def(ctx, 0x401550, 0x00001018);
328 334
329 /* 1800 */ 335 /* 1800: STREAMOUT */
330 cp_ctx(ctx, 0x401814, 0x1); 336 cp_ctx(ctx, 0x401814, 0x1);
331 gr_def(ctx, 0x401814, 0x000000ff); 337 gr_def(ctx, 0x401814, 0x000000ff);
332 if (dev_priv->chipset == 0x50) { 338 if (dev_priv->chipset == 0x50) {
@@ -359,6 +365,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
359 case 0xac: 365 case 0xac:
360 gr_def(ctx, 0x401c00, 0x042500df); 366 gr_def(ctx, 0x401c00, 0x042500df);
361 break; 367 break;
368 case 0xa3:
362 case 0xa5: 369 case 0xa5:
363 case 0xa8: 370 case 0xa8:
364 gr_def(ctx, 0x401c00, 0x142500df); 371 gr_def(ctx, 0x401c00, 0x142500df);
@@ -413,6 +420,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
413 break; 420 break;
414 case 0x84: 421 case 0x84:
415 case 0xa0: 422 case 0xa0:
423 case 0xa3:
416 case 0xa5: 424 case 0xa5:
417 case 0xa8: 425 case 0xa8:
418 case 0xaa: 426 case 0xaa:
@@ -641,7 +649,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
641 if (dev_priv->chipset == 0x50) 649 if (dev_priv->chipset == 0x50)
642 cp_ctx(ctx, 0x4063e0, 0x1); 650 cp_ctx(ctx, 0x4063e0, 0x1);
643 651
644 /* 6800 */ 652 /* 6800: M2MF */
645 if (dev_priv->chipset < 0x90) { 653 if (dev_priv->chipset < 0x90) {
646 cp_ctx(ctx, 0x406814, 0x2b); 654 cp_ctx(ctx, 0x406814, 0x2b);
647 gr_def(ctx, 0x406818, 0x00000f80); 655 gr_def(ctx, 0x406818, 0x00000f80);
@@ -787,6 +795,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
787 case 0xa5: 795 case 0xa5:
788 gr_def(ctx, offset + 0x1c, 0x310c0000); 796 gr_def(ctx, offset + 0x1c, 0x310c0000);
789 break; 797 break;
798 case 0xa3:
790 case 0xa8: 799 case 0xa8:
791 case 0xaa: 800 case 0xaa:
792 case 0xac: 801 case 0xac:
@@ -854,6 +863,8 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
854 else 863 else
855 gr_def(ctx, offset + 0x8, 0x05010202); 864 gr_def(ctx, offset + 0x8, 0x05010202);
856 gr_def(ctx, offset + 0xc, 0x00030201); 865 gr_def(ctx, offset + 0xc, 0x00030201);
866 if (dev_priv->chipset == 0xa3)
867 cp_ctx(ctx, base + 0x36c, 1);
857 868
858 cp_ctx(ctx, base + 0x400, 2); 869 cp_ctx(ctx, base + 0x400, 2);
859 gr_def(ctx, base + 0x404, 0x00000040); 870 gr_def(ctx, base + 0x404, 0x00000040);
@@ -1154,7 +1165,9 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1154 nv50_graph_construct_gene_unk8(ctx); 1165 nv50_graph_construct_gene_unk8(ctx);
1155 if (dev_priv->chipset == 0xa0) 1166 if (dev_priv->chipset == 0xa0)
1156 xf_emit(ctx, 0x189, 0); 1167 xf_emit(ctx, 0x189, 0);
1157 else if (dev_priv->chipset < 0xa8) 1168 else if (dev_priv->chipset == 0xa3)
1169 xf_emit(ctx, 0xd5, 0);
1170 else if (dev_priv->chipset == 0xa5)
1158 xf_emit(ctx, 0x99, 0); 1171 xf_emit(ctx, 0x99, 0);
1159 else if (dev_priv->chipset == 0xaa) 1172 else if (dev_priv->chipset == 0xaa)
1160 xf_emit(ctx, 0x65, 0); 1173 xf_emit(ctx, 0x65, 0);
@@ -1192,6 +1205,8 @@ nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
1192 ctx->ctxvals_pos = offset + 4; 1205 ctx->ctxvals_pos = offset + 4;
1193 if (dev_priv->chipset == 0xa0) 1206 if (dev_priv->chipset == 0xa0)
1194 xf_emit(ctx, 0xa80, 0); 1207 xf_emit(ctx, 0xa80, 0);
1208 else if (dev_priv->chipset == 0xa3)
1209 xf_emit(ctx, 0xa7c, 0);
1195 else 1210 else
1196 xf_emit(ctx, 0xa7a, 0); 1211 xf_emit(ctx, 0xa7a, 0);
1197 xf_emit(ctx, 1, 0x3fffff); 1212 xf_emit(ctx, 1, 0x3fffff);
@@ -1336,6 +1351,7 @@ nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
1336 xf_emit(ctx, 0x942, 0); 1351 xf_emit(ctx, 0x942, 0);
1337 break; 1352 break;
1338 case 0xa0: 1353 case 0xa0:
1354 case 0xa3:
1339 xf_emit(ctx, 0x2042, 0); 1355 xf_emit(ctx, 0x2042, 0);
1340 break; 1356 break;
1341 case 0xa5: 1357 case 0xa5:
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index de1f5b0062c5..5f21df31f3aa 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -63,9 +63,10 @@ nv50_instmem_init(struct drm_device *dev)
63 struct drm_nouveau_private *dev_priv = dev->dev_private; 63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan; 64 struct nouveau_channel *chan;
65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size; 65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
66 uint32_t save_nv001700;
67 uint64_t v;
66 struct nv50_instmem_priv *priv; 68 struct nv50_instmem_priv *priv;
67 int ret, i; 69 int ret, i;
68 uint32_t v, save_nv001700;
69 70
70 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 71 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
71 if (!priv) 72 if (!priv)
@@ -76,17 +77,12 @@ nv50_instmem_init(struct drm_device *dev)
76 for (i = 0x1700; i <= 0x1710; i += 4) 77 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i); 78 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78 79
79 if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
80 dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
81 else
82 dev_priv->vram_sys_base = 0;
83
84 /* Reserve the last MiB of VRAM, we should probably try to avoid 80 /* Reserve the last MiB of VRAM, we should probably try to avoid
85 * setting up the below tables over the top of the VBIOS image at 81 * setting up the below tables over the top of the VBIOS image at
86 * some point. 82 * some point.
87 */ 83 */
88 dev_priv->ramin_rsvd_vram = 1 << 20; 84 dev_priv->ramin_rsvd_vram = 1 << 20;
89 c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram; 85 c_offset = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
90 c_size = 128 << 10; 86 c_size = 128 << 10;
91 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200; 87 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
92 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20; 88 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
@@ -106,7 +102,7 @@ nv50_instmem_init(struct drm_device *dev)
106 dev_priv->vm_gart_size = NV50_VM_BLOCK; 102 dev_priv->vm_gart_size = NV50_VM_BLOCK;
107 103
108 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size; 104 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
109 dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev); 105 dev_priv->vm_vram_size = dev_priv->vram_size;
110 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM) 106 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
111 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM; 107 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
112 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK); 108 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
@@ -189,8 +185,8 @@ nv50_instmem_init(struct drm_device *dev)
189 185
190 i = 0; 186 i = 0;
191 while (v < dev_priv->vram_sys_base + c_offset + c_size) { 187 while (v < dev_priv->vram_sys_base + c_offset + c_size) {
192 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v); 188 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, lower_32_bits(v));
193 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000); 189 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, upper_32_bits(v));
194 v += 0x1000; 190 v += 0x1000;
195 i += 8; 191 i += 8;
196 } 192 }
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index c2fff543b06f..0c68698f23df 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -211,7 +211,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
211 mode_ctl = 0x0200; 211 mode_ctl = 0x0200;
212 break; 212 break;
213 case OUTPUT_DP: 213 case OUTPUT_DP:
214 mode_ctl |= 0x00050000; 214 mode_ctl |= (nv_encoder->dp.mc_unknown << 16);
215 if (nv_encoder->dcb->sorconf.link & 1) 215 if (nv_encoder->dcb->sorconf.link & 1)
216 mode_ctl |= 0x00000800; 216 mode_ctl |= 0x00000800;
217 else 217 else
@@ -274,6 +274,7 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
274int 274int
275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) 275nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
276{ 276{
277 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 struct nouveau_encoder *nv_encoder = NULL; 278 struct nouveau_encoder *nv_encoder = NULL;
278 struct drm_encoder *encoder; 279 struct drm_encoder *encoder;
279 bool dum; 280 bool dum;
@@ -319,5 +320,27 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
319 encoder->possible_crtcs = entry->heads; 320 encoder->possible_crtcs = entry->heads;
320 encoder->possible_clones = 0; 321 encoder->possible_clones = 0;
321 322
323 if (nv_encoder->dcb->type == OUTPUT_DP) {
324 uint32_t mc, or = nv_encoder->or;
325
326 if (dev_priv->chipset < 0x90 ||
327 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
328 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(or));
329 else
330 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(or));
331
332 switch ((mc & 0x00000f00) >> 8) {
333 case 8:
334 case 9:
335 nv_encoder->dp.mc_unknown = (mc & 0x000f0000) >> 16;
336 break;
337 default:
338 break;
339 }
340
341 if (!nv_encoder->dp.mc_unknown)
342 nv_encoder->dp.mc_unknown = 5;
343 }
344
322 return 0; 345 return 0;
323} 346}
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 4c39a407aa4a..e671d0e74d4c 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -31,6 +31,7 @@
31 31
32#include <linux/firmware.h> 32#include <linux/firmware.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/slab.h>
34 35
35#include "drmP.h" 36#include "drmP.h"
36#include "drm.h" 37#include "drm.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index ed38262d9985..3c91312dea9a 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 50radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
51 radeon_irq.o r300_cmdbuf.o r600_cp.o 51 radeon_irq.o r300_cmdbuf.o r600_cp.o
52# add KMS driver 52# add KMS driver
53radeon-y += radeon_device.o radeon_kms.o \ 53radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
54 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ 54 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
55 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \ 55 atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
56 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \ 56 radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d75788feac6c..1d569830ed99 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27#include <linux/slab.h>
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28 29
29#define ATOM_DEBUG 30#define ATOM_DEBUG
@@ -52,15 +53,17 @@
52 53
53typedef struct { 54typedef struct {
54 struct atom_context *ctx; 55 struct atom_context *ctx;
55
56 uint32_t *ps, *ws; 56 uint32_t *ps, *ws;
57 int ps_shift; 57 int ps_shift;
58 uint16_t start; 58 uint16_t start;
59 unsigned last_jump;
60 unsigned long last_jump_jiffies;
61 bool abort;
59} atom_exec_context; 62} atom_exec_context;
60 63
61int atom_debug = 0; 64int atom_debug = 0;
62static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); 65static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
63void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); 66int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
64 67
65static uint32_t atom_arg_mask[8] = 68static uint32_t atom_arg_mask[8] =
66 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, 69 { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -604,12 +607,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
604static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) 607static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
605{ 608{
606 int idx = U8((*ptr)++); 609 int idx = U8((*ptr)++);
610 int r = 0;
611
607 if (idx < ATOM_TABLE_NAMES_CNT) 612 if (idx < ATOM_TABLE_NAMES_CNT)
608 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]); 613 SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
609 else 614 else
610 SDEBUG(" table: %d\n", idx); 615 SDEBUG(" table: %d\n", idx);
611 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) 616 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
612 atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); 617 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
618 if (r) {
619 ctx->abort = true;
620 }
613} 621}
614 622
615static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) 623static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -673,6 +681,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
673static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) 681static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
674{ 682{
675 int execute = 0, target = U16(*ptr); 683 int execute = 0, target = U16(*ptr);
684 unsigned long cjiffies;
685
676 (*ptr) += 2; 686 (*ptr) += 2;
677 switch (arg) { 687 switch (arg) {
678 case ATOM_COND_ABOVE: 688 case ATOM_COND_ABOVE:
@@ -700,8 +710,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
700 if (arg != ATOM_COND_ALWAYS) 710 if (arg != ATOM_COND_ALWAYS)
701 SDEBUG(" taken: %s\n", execute ? "yes" : "no"); 711 SDEBUG(" taken: %s\n", execute ? "yes" : "no");
702 SDEBUG(" target: 0x%04X\n", target); 712 SDEBUG(" target: 0x%04X\n", target);
703 if (execute) 713 if (execute) {
714 if (ctx->last_jump == (ctx->start + target)) {
715 cjiffies = jiffies;
716 if (time_after(cjiffies, ctx->last_jump_jiffies)) {
717 cjiffies -= ctx->last_jump_jiffies;
718 if ((jiffies_to_msecs(cjiffies) > 1000)) {
719 DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
720 ctx->abort = true;
721 }
722 } else {
723 /* jiffies wrap around we will just wait a little longer */
724 ctx->last_jump_jiffies = jiffies;
725 }
726 } else {
727 ctx->last_jump = ctx->start + target;
728 ctx->last_jump_jiffies = jiffies;
729 }
704 *ptr = ctx->start + target; 730 *ptr = ctx->start + target;
731 }
705} 732}
706 733
707static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) 734static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -881,11 +908,16 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
881 uint8_t attr = U8((*ptr)++), shift; 908 uint8_t attr = U8((*ptr)++), shift;
882 uint32_t saved, dst; 909 uint32_t saved, dst;
883 int dptr = *ptr; 910 int dptr = *ptr;
911 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
884 SDEBUG(" dst: "); 912 SDEBUG(" dst: ");
885 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 913 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
914 /* op needs to full dst value */
915 dst = saved;
886 shift = atom_get_src(ctx, attr, ptr); 916 shift = atom_get_src(ctx, attr, ptr);
887 SDEBUG(" shift: %d\n", shift); 917 SDEBUG(" shift: %d\n", shift);
888 dst <<= shift; 918 dst <<= shift;
919 dst &= atom_arg_mask[dst_align];
920 dst >>= atom_arg_shift[dst_align];
889 SDEBUG(" dst: "); 921 SDEBUG(" dst: ");
890 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 922 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
891} 923}
@@ -895,11 +927,16 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
895 uint8_t attr = U8((*ptr)++), shift; 927 uint8_t attr = U8((*ptr)++), shift;
896 uint32_t saved, dst; 928 uint32_t saved, dst;
897 int dptr = *ptr; 929 int dptr = *ptr;
930 uint32_t dst_align = atom_dst_to_src[(attr >> 3) & 7][(attr >> 6) & 3];
898 SDEBUG(" dst: "); 931 SDEBUG(" dst: ");
899 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); 932 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
933 /* op needs to full dst value */
934 dst = saved;
900 shift = atom_get_src(ctx, attr, ptr); 935 shift = atom_get_src(ctx, attr, ptr);
901 SDEBUG(" shift: %d\n", shift); 936 SDEBUG(" shift: %d\n", shift);
902 dst >>= shift; 937 dst >>= shift;
938 dst &= atom_arg_mask[dst_align];
939 dst >>= atom_arg_shift[dst_align];
903 SDEBUG(" dst: "); 940 SDEBUG(" dst: ");
904 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); 941 atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
905} 942}
@@ -1104,15 +1141,16 @@ static struct {
1104 atom_op_shr, ATOM_ARG_MC}, { 1141 atom_op_shr, ATOM_ARG_MC}, {
1105atom_op_debug, 0},}; 1142atom_op_debug, 0},};
1106 1143
1107static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) 1144static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
1108{ 1145{
1109 int base = CU16(ctx->cmd_table + 4 + 2 * index); 1146 int base = CU16(ctx->cmd_table + 4 + 2 * index);
1110 int len, ws, ps, ptr; 1147 int len, ws, ps, ptr;
1111 unsigned char op; 1148 unsigned char op;
1112 atom_exec_context ectx; 1149 atom_exec_context ectx;
1150 int ret = 0;
1113 1151
1114 if (!base) 1152 if (!base)
1115 return; 1153 return -EINVAL;
1116 1154
1117 len = CU16(base + ATOM_CT_SIZE_PTR); 1155 len = CU16(base + ATOM_CT_SIZE_PTR);
1118 ws = CU8(base + ATOM_CT_WS_PTR); 1156 ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1125,6 +1163,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1125 ectx.ps_shift = ps / 4; 1163 ectx.ps_shift = ps / 4;
1126 ectx.start = base; 1164 ectx.start = base;
1127 ectx.ps = params; 1165 ectx.ps = params;
1166 ectx.abort = false;
1167 ectx.last_jump = 0;
1128 if (ws) 1168 if (ws)
1129 ectx.ws = kzalloc(4 * ws, GFP_KERNEL); 1169 ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
1130 else 1170 else
@@ -1137,6 +1177,12 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1137 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1); 1177 SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
1138 else 1178 else
1139 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1); 1179 SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
1180 if (ectx.abort) {
1181 DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
1182 base, len, ws, ps, ptr - 1);
1183 ret = -EINVAL;
1184 goto free;
1185 }
1140 1186
1141 if (op < ATOM_OP_CNT && op > 0) 1187 if (op < ATOM_OP_CNT && op > 0)
1142 opcode_table[op].func(&ectx, &ptr, 1188 opcode_table[op].func(&ectx, &ptr,
@@ -1150,12 +1196,16 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
1150 debug_depth--; 1196 debug_depth--;
1151 SDEBUG("<<\n"); 1197 SDEBUG("<<\n");
1152 1198
1199free:
1153 if (ws) 1200 if (ws)
1154 kfree(ectx.ws); 1201 kfree(ectx.ws);
1202 return ret;
1155} 1203}
1156 1204
1157void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1205int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1158{ 1206{
1207 int r;
1208
1159 mutex_lock(&ctx->mutex); 1209 mutex_lock(&ctx->mutex);
1160 /* reset reg block */ 1210 /* reset reg block */
1161 ctx->reg_block = 0; 1211 ctx->reg_block = 0;
@@ -1163,8 +1213,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1163 ctx->fb_base = 0; 1213 ctx->fb_base = 0;
1164 /* reset io mode */ 1214 /* reset io mode */
1165 ctx->io_mode = ATOM_IO_MM; 1215 ctx->io_mode = ATOM_IO_MM;
1166 atom_execute_table_locked(ctx, index, params); 1216 r = atom_execute_table_locked(ctx, index, params);
1167 mutex_unlock(&ctx->mutex); 1217 mutex_unlock(&ctx->mutex);
1218 return r;
1168} 1219}
1169 1220
1170static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1221static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1248,9 +1299,7 @@ int atom_asic_init(struct atom_context *ctx)
1248 1299
1249 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) 1300 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
1250 return 1; 1301 return 1;
1251 atom_execute_table(ctx, ATOM_CMD_INIT, ps); 1302 return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
1252
1253 return 0;
1254} 1303}
1255 1304
1256void atom_destroy(struct atom_context *ctx) 1305void atom_destroy(struct atom_context *ctx)
@@ -1260,12 +1309,16 @@ void atom_destroy(struct atom_context *ctx)
1260 kfree(ctx); 1309 kfree(ctx);
1261} 1310}
1262 1311
1263void atom_parse_data_header(struct atom_context *ctx, int index, 1312bool atom_parse_data_header(struct atom_context *ctx, int index,
1264 uint16_t * size, uint8_t * frev, uint8_t * crev, 1313 uint16_t * size, uint8_t * frev, uint8_t * crev,
1265 uint16_t * data_start) 1314 uint16_t * data_start)
1266{ 1315{
1267 int offset = index * 2 + 4; 1316 int offset = index * 2 + 4;
1268 int idx = CU16(ctx->data_table + offset); 1317 int idx = CU16(ctx->data_table + offset);
1318 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
1319
1320 if (!mdt[index])
1321 return false;
1269 1322
1270 if (size) 1323 if (size)
1271 *size = CU16(idx); 1324 *size = CU16(idx);
@@ -1274,38 +1327,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
1274 if (crev) 1327 if (crev)
1275 *crev = CU8(idx + 3); 1328 *crev = CU8(idx + 3);
1276 *data_start = idx; 1329 *data_start = idx;
1277 return; 1330 return true;
1278} 1331}
1279 1332
1280void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, 1333bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1281 uint8_t * crev) 1334 uint8_t * crev)
1282{ 1335{
1283 int offset = index * 2 + 4; 1336 int offset = index * 2 + 4;
1284 int idx = CU16(ctx->cmd_table + offset); 1337 int idx = CU16(ctx->cmd_table + offset);
1338 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
1339
1340 if (!mct[index])
1341 return false;
1285 1342
1286 if (frev) 1343 if (frev)
1287 *frev = CU8(idx + 2); 1344 *frev = CU8(idx + 2);
1288 if (crev) 1345 if (crev)
1289 *crev = CU8(idx + 3); 1346 *crev = CU8(idx + 3);
1290 return; 1347 return true;
1291} 1348}
1292 1349
1293int atom_allocate_fb_scratch(struct atom_context *ctx) 1350int atom_allocate_fb_scratch(struct atom_context *ctx)
1294{ 1351{
1295 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); 1352 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1296 uint16_t data_offset; 1353 uint16_t data_offset;
1297 int usage_bytes; 1354 int usage_bytes = 0;
1298 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; 1355 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1299 1356
1300 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); 1357 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1358 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1301 1359
1302 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); 1360 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1361 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1362 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1303 1363
1304 DRM_DEBUG("atom firmware requested %08x %dkb\n", 1364 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1305 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, 1365 }
1306 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1307
1308 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1309 if (usage_bytes == 0) 1366 if (usage_bytes == 0)
1310 usage_bytes = 20 * 1024; 1367 usage_bytes = 20 * 1024;
1311 /* allocate some scratch memory */ 1368 /* allocate some scratch memory */
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index bc73781423a1..cd1b64ab5ca7 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -140,11 +140,13 @@ struct atom_context {
140extern int atom_debug; 140extern int atom_debug;
141 141
142struct atom_context *atom_parse(struct card_info *, void *); 142struct atom_context *atom_parse(struct card_info *, void *);
143void atom_execute_table(struct atom_context *, int, uint32_t *); 143int atom_execute_table(struct atom_context *, int, uint32_t *);
144int atom_asic_init(struct atom_context *); 144int atom_asic_init(struct atom_context *);
145void atom_destroy(struct atom_context *); 145void atom_destroy(struct atom_context *);
146void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); 146bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
147void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); 147 uint8_t *frev, uint8_t *crev, uint16_t *data_start);
148bool atom_parse_cmd_header(struct atom_context *ctx, int index,
149 uint8_t *frev, uint8_t *crev);
148int atom_allocate_fb_scratch(struct atom_context *ctx); 150int atom_allocate_fb_scratch(struct atom_context *ctx);
149#include "atom-types.h" 151#include "atom-types.h"
150#include "atombios.h" 152#include "atombios.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6732b5dd8ff4..27e2c715be11 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2912,7 +2912,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2
2912 UCHAR ucTV_BootUpDefaultStandard; 2912 UCHAR ucTV_BootUpDefaultStandard;
2913 UCHAR ucExt_TV_ASIC_ID; 2913 UCHAR ucExt_TV_ASIC_ID;
2914 UCHAR ucExt_TV_ASIC_SlaveAddr; 2914 UCHAR ucExt_TV_ASIC_SlaveAddr;
2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; 2915 ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING_V1_2];
2916}ATOM_ANALOG_TV_INFO_V1_2; 2916}ATOM_ANALOG_TV_INFO_V1_2;
2917 2917
2918typedef struct _ATOM_DPCD_INFO 2918typedef struct _ATOM_DPCD_INFO
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dd9fdf560611..a87990b3ae84 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 353 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
354} 354}
355 355
356static void atombios_disable_ss(struct drm_crtc *crtc)
357{
358 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
359 struct drm_device *dev = crtc->dev;
360 struct radeon_device *rdev = dev->dev_private;
361 u32 ss_cntl;
362
363 if (ASIC_IS_DCE4(rdev)) {
364 switch (radeon_crtc->pll_id) {
365 case ATOM_PPLL1:
366 ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
367 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
368 WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
369 break;
370 case ATOM_PPLL2:
371 ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
372 ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
373 WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
374 break;
375 case ATOM_DCPLL:
376 case ATOM_PPLL_INVALID:
377 return;
378 }
379 } else if (ASIC_IS_AVIVO(rdev)) {
380 switch (radeon_crtc->pll_id) {
381 case ATOM_PPLL1:
382 ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
383 ss_cntl &= ~1;
384 WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
385 break;
386 case ATOM_PPLL2:
387 ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
388 ss_cntl &= ~1;
389 WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
390 break;
391 case ATOM_DCPLL:
392 case ATOM_PPLL_INVALID:
393 return;
394 }
395 }
396}
397
398
356union atom_enable_ss { 399union atom_enable_ss {
357 ENABLE_LVDS_SS_PARAMETERS legacy; 400 ENABLE_LVDS_SS_PARAMETERS legacy;
358 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; 401 ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
359}; 402};
360 403
361static void atombios_set_ss(struct drm_crtc *crtc, int enable) 404static void atombios_enable_ss(struct drm_crtc *crtc)
362{ 405{
363 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 406 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
364 struct drm_device *dev = crtc->dev; 407 struct drm_device *dev = crtc->dev;
@@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
387 step = dig->ss->step; 430 step = dig->ss->step;
388 delay = dig->ss->delay; 431 delay = dig->ss->delay;
389 range = dig->ss->range; 432 range = dig->ss->range;
390 } else if (enable) 433 } else
391 return; 434 return;
392 } else if (enable) 435 } else
393 return; 436 return;
394 break; 437 break;
395 } 438 }
@@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
406 args.v1.ucSpreadSpectrumDelay = delay; 449 args.v1.ucSpreadSpectrumDelay = delay;
407 args.v1.ucSpreadSpectrumRange = range; 450 args.v1.ucSpreadSpectrumRange = range;
408 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; 451 args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
409 args.v1.ucEnable = enable; 452 args.v1.ucEnable = ATOM_ENABLE;
410 } else { 453 } else {
411 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); 454 args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
412 args.legacy.ucSpreadSpectrumType = type; 455 args.legacy.ucSpreadSpectrumType = type;
413 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; 456 args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
414 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; 457 args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
415 args.legacy.ucEnable = enable; 458 args.legacy.ucEnable = ATOM_ENABLE;
416 } 459 }
417 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 460 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
418} 461}
@@ -478,10 +521,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
478 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 521 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
479 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 522 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
480 adjusted_clock = mode->clock * 2; 523 adjusted_clock = mode->clock * 2;
481 /* LVDS PLL quirks */ 524 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
482 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 525 pll->algo = PLL_ALGO_LEGACY;
483 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 526 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
484 pll->algo = dig->pll_algo;
485 } 527 }
486 } else { 528 } else {
487 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 529 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
@@ -503,8 +545,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
503 int index; 545 int index;
504 546
505 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); 547 index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
506 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 548 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
507 &crev); 549 &crev))
550 return adjusted_clock;
508 551
509 memset(&args, 0, sizeof(args)); 552 memset(&args, 0, sizeof(args));
510 553
@@ -542,11 +585,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
542 } 585 }
543 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 586 } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
544 /* may want to enable SS on DP/eDP eventually */ 587 /* may want to enable SS on DP/eDP eventually */
545 args.v3.sInput.ucDispPllConfig |= 588 /*args.v3.sInput.ucDispPllConfig |=
546 DISPPLL_CONFIG_SS_ENABLE; 589 DISPPLL_CONFIG_SS_ENABLE;*/
547 if (mode->clock > 165000) 590 if (encoder_mode == ATOM_ENCODER_MODE_DP)
548 args.v3.sInput.ucDispPllConfig |= 591 args.v3.sInput.ucDispPllConfig |=
549 DISPPLL_CONFIG_DUAL_LINK; 592 DISPPLL_CONFIG_COHERENT_MODE;
593 else {
594 if (mode->clock > 165000)
595 args.v3.sInput.ucDispPllConfig |=
596 DISPPLL_CONFIG_DUAL_LINK;
597 }
550 } 598 }
551 atom_execute_table(rdev->mode_info.atom_context, 599 atom_execute_table(rdev->mode_info.atom_context,
552 index, (uint32_t *)&args); 600 index, (uint32_t *)&args);
@@ -592,8 +640,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
592 memset(&args, 0, sizeof(args)); 640 memset(&args, 0, sizeof(args));
593 641
594 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 642 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
595 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 643 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
596 &crev); 644 &crev))
645 return;
597 646
598 switch (frev) { 647 switch (frev) {
599 case 1: 648 case 1:
@@ -667,8 +716,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
667 &ref_div, &post_div); 716 &ref_div, &post_div);
668 717
669 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 718 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
670 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 719 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
671 &crev); 720 &crev))
721 return;
672 722
673 switch (frev) { 723 switch (frev) {
674 case 1: 724 case 1:
@@ -1083,15 +1133,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
1083 1133
1084 /* TODO color tiling */ 1134 /* TODO color tiling */
1085 1135
1086 /* pick pll */ 1136 atombios_disable_ss(crtc);
1087 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1088
1089 atombios_set_ss(crtc, 0);
1090 /* always set DCPLL */ 1137 /* always set DCPLL */
1091 if (ASIC_IS_DCE4(rdev)) 1138 if (ASIC_IS_DCE4(rdev))
1092 atombios_crtc_set_dcpll(crtc); 1139 atombios_crtc_set_dcpll(crtc);
1093 atombios_crtc_set_pll(crtc, adjusted_mode); 1140 atombios_crtc_set_pll(crtc, adjusted_mode);
1094 atombios_set_ss(crtc, 1); 1141 atombios_enable_ss(crtc);
1095 1142
1096 if (ASIC_IS_DCE4(rdev)) 1143 if (ASIC_IS_DCE4(rdev))
1097 atombios_set_crtc_dtd_timing(crtc, adjusted_mode); 1144 atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1120,6 +1167,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1120 1167
1121static void atombios_crtc_prepare(struct drm_crtc *crtc) 1168static void atombios_crtc_prepare(struct drm_crtc *crtc)
1122{ 1169{
1170 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1171
1172 /* pick pll */
1173 radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
1174
1123 atombios_lock_crtc(crtc, ATOM_ENABLE); 1175 atombios_lock_crtc(crtc, ATOM_ENABLE);
1124 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1176 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1125} 1177}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8a133bda00a2..28b31c64f48d 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder,
745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT); 745 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
746 746
747 /* disable the training pattern on the sink */ 747 /* disable the training pattern on the sink */
748 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
749
750 /* disable the training pattern on the source */
748 if (ASIC_IS_DCE4(rdev)) 751 if (ASIC_IS_DCE4(rdev))
749 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); 752 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
750 else 753 else
751 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, 754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
752 dig_connector->dp_clock, enc_id, 0); 755 dig_connector->dp_clock, enc_id, 0);
753
754 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
755 dig_connector->dp_clock, enc_id, 0);
756} 756}
757 757
758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 758int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index bd2e7aa85c1d..e8f447e20507 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -23,8 +23,10 @@
23 */ 23 */
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/slab.h>
26#include "drmP.h" 27#include "drmP.h"
27#include "radeon.h" 28#include "radeon.h"
29#include "radeon_asic.h"
28#include "radeon_drm.h" 30#include "radeon_drm.h"
29#include "rv770d.h" 31#include "rv770d.h"
30#include "atom.h" 32#include "atom.h"
@@ -436,7 +438,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
436 438
437int evergreen_mc_init(struct radeon_device *rdev) 439int evergreen_mc_init(struct radeon_device *rdev)
438{ 440{
439 fixed20_12 a;
440 u32 tmp; 441 u32 tmp;
441 int chansize, numchan; 442 int chansize, numchan;
442 443
@@ -481,12 +482,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
481 rdev->mc.real_vram_size = rdev->mc.aper_size; 482 rdev->mc.real_vram_size = rdev->mc.aper_size;
482 } 483 }
483 r600_vram_gtt_location(rdev, &rdev->mc); 484 r600_vram_gtt_location(rdev, &rdev->mc);
484 /* FIXME: we should enforce default clock in case GPU is not in 485 radeon_update_bandwidth_info(rdev);
485 * default setup 486
486 */
487 a.full = rfixed_const(100);
488 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
489 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
490 return 0; 487 return 0;
491} 488}
492 489
@@ -746,6 +743,7 @@ int evergreen_init(struct radeon_device *rdev)
746 743
747void evergreen_fini(struct radeon_device *rdev) 744void evergreen_fini(struct radeon_device *rdev)
748{ 745{
746 radeon_pm_fini(rdev);
749 evergreen_suspend(rdev); 747 evergreen_suspend(rdev);
750#if 0 748#if 0
751 r600_blit_fini(rdev); 749 r600_blit_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 91eb762eb3f9..cf60c0b3ef15 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -26,11 +26,13 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "radeon_drm.h" 32#include "radeon_drm.h"
32#include "radeon_reg.h" 33#include "radeon_reg.h"
33#include "radeon.h" 34#include "radeon.h"
35#include "radeon_asic.h"
34#include "r100d.h" 36#include "r100d.h"
35#include "rs100d.h" 37#include "rs100d.h"
36#include "rv200d.h" 38#include "rv200d.h"
@@ -235,9 +237,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
235 237
236void r100_pci_gart_fini(struct radeon_device *rdev) 238void r100_pci_gart_fini(struct radeon_device *rdev)
237{ 239{
240 radeon_gart_fini(rdev);
238 r100_pci_gart_disable(rdev); 241 r100_pci_gart_disable(rdev);
239 radeon_gart_table_ram_free(rdev); 242 radeon_gart_table_ram_free(rdev);
240 radeon_gart_fini(rdev);
241} 243}
242 244
243int r100_irq_set(struct radeon_device *rdev) 245int r100_irq_set(struct radeon_device *rdev)
@@ -312,10 +314,12 @@ int r100_irq_process(struct radeon_device *rdev)
312 /* Vertical blank interrupts */ 314 /* Vertical blank interrupts */
313 if (status & RADEON_CRTC_VBLANK_STAT) { 315 if (status & RADEON_CRTC_VBLANK_STAT) {
314 drm_handle_vblank(rdev->ddev, 0); 316 drm_handle_vblank(rdev->ddev, 0);
317 rdev->pm.vblank_sync = true;
315 wake_up(&rdev->irq.vblank_queue); 318 wake_up(&rdev->irq.vblank_queue);
316 } 319 }
317 if (status & RADEON_CRTC2_VBLANK_STAT) { 320 if (status & RADEON_CRTC2_VBLANK_STAT) {
318 drm_handle_vblank(rdev->ddev, 1); 321 drm_handle_vblank(rdev->ddev, 1);
322 rdev->pm.vblank_sync = true;
319 wake_up(&rdev->irq.vblank_queue); 323 wake_up(&rdev->irq.vblank_queue);
320 } 324 }
321 if (status & RADEON_FP_DETECT_STAT) { 325 if (status & RADEON_FP_DETECT_STAT) {
@@ -741,6 +745,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
741 udelay(10); 745 udelay(10);
742 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); 746 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
743 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR); 747 rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
748 /* protect against crazy HW on resume */
749 rdev->cp.wptr &= rdev->cp.ptr_mask;
744 /* Set cp mode to bus mastering & enable cp*/ 750 /* Set cp mode to bus mastering & enable cp*/
745 WREG32(RADEON_CP_CSQ_MODE, 751 WREG32(RADEON_CP_CSQ_MODE,
746 REG_SET(RADEON_INDIRECT2_START, indirect2_start) | 752 REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1804,6 +1810,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
1804{ 1810{
1805 struct drm_device *dev = rdev->ddev; 1811 struct drm_device *dev = rdev->ddev;
1806 bool force_dac2 = false; 1812 bool force_dac2 = false;
1813 u32 tmp;
1807 1814
1808 /* set these so they don't interfere with anything */ 1815 /* set these so they don't interfere with anything */
1809 WREG32(RADEON_OV0_SCALE_CNTL, 0); 1816 WREG32(RADEON_OV0_SCALE_CNTL, 0);
@@ -1875,6 +1882,12 @@ void r100_set_common_regs(struct radeon_device *rdev)
1875 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1882 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1876 WREG32(RADEON_DAC_CNTL2, dac2_cntl); 1883 WREG32(RADEON_DAC_CNTL2, dac2_cntl);
1877 } 1884 }
1885
1886 /* switch PM block to ACPI mode */
1887 tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
1888 tmp &= ~RADEON_PM_MODE_SEL;
1889 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
1890
1878} 1891}
1879 1892
1880/* 1893/*
@@ -2022,6 +2035,7 @@ void r100_mc_init(struct radeon_device *rdev)
2022 radeon_vram_location(rdev, &rdev->mc, base); 2035 radeon_vram_location(rdev, &rdev->mc, base);
2023 if (!(rdev->flags & RADEON_IS_AGP)) 2036 if (!(rdev->flags & RADEON_IS_AGP))
2024 radeon_gtt_location(rdev, &rdev->mc); 2037 radeon_gtt_location(rdev, &rdev->mc);
2038 radeon_update_bandwidth_info(rdev);
2025} 2039}
2026 2040
2027 2041
@@ -2385,6 +2399,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2385 uint32_t pixel_bytes1 = 0; 2399 uint32_t pixel_bytes1 = 0;
2386 uint32_t pixel_bytes2 = 0; 2400 uint32_t pixel_bytes2 = 0;
2387 2401
2402 radeon_update_display_priority(rdev);
2403
2388 if (rdev->mode_info.crtcs[0]->base.enabled) { 2404 if (rdev->mode_info.crtcs[0]->base.enabled) {
2389 mode1 = &rdev->mode_info.crtcs[0]->base.mode; 2405 mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2390 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; 2406 pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@@ -2413,11 +2429,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
2413 /* 2429 /*
2414 * determine is there is enough bw for current mode 2430 * determine is there is enough bw for current mode
2415 */ 2431 */
2416 mclk_ff.full = rfixed_const(rdev->clock.default_mclk); 2432 sclk_ff = rdev->pm.sclk;
2417 temp_ff.full = rfixed_const(100); 2433 mclk_ff = rdev->pm.mclk;
2418 mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
2419 sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
2420 sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
2421 2434
2422 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); 2435 temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
2423 temp_ff.full = rfixed_const(temp); 2436 temp_ff.full = rfixed_const(temp);
@@ -2878,7 +2891,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2878{ 2891{
2879 struct radeon_bo *robj; 2892 struct radeon_bo *robj;
2880 unsigned long size; 2893 unsigned long size;
2881 unsigned u, i, w, h; 2894 unsigned u, i, w, h, d;
2882 int ret; 2895 int ret;
2883 2896
2884 for (u = 0; u < track->num_texture; u++) { 2897 for (u = 0; u < track->num_texture; u++) {
@@ -2910,20 +2923,25 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2910 h = h / (1 << i); 2923 h = h / (1 << i);
2911 if (track->textures[u].roundup_h) 2924 if (track->textures[u].roundup_h)
2912 h = roundup_pow_of_two(h); 2925 h = roundup_pow_of_two(h);
2926 if (track->textures[u].tex_coord_type == 1) {
2927 d = (1 << track->textures[u].txdepth) / (1 << i);
2928 if (!d)
2929 d = 1;
2930 } else {
2931 d = 1;
2932 }
2913 if (track->textures[u].compress_format) { 2933 if (track->textures[u].compress_format) {
2914 2934
2915 size += r100_track_compress_size(track->textures[u].compress_format, w, h); 2935 size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
2916 /* compressed textures are block based */ 2936 /* compressed textures are block based */
2917 } else 2937 } else
2918 size += w * h; 2938 size += w * h * d;
2919 } 2939 }
2920 size *= track->textures[u].cpp; 2940 size *= track->textures[u].cpp;
2921 2941
2922 switch (track->textures[u].tex_coord_type) { 2942 switch (track->textures[u].tex_coord_type) {
2923 case 0: 2943 case 0:
2924 break;
2925 case 1: 2944 case 1:
2926 size *= (1 << track->textures[u].txdepth);
2927 break; 2945 break;
2928 case 2: 2946 case 2:
2929 if (track->separate_cube) { 2947 if (track->separate_cube) {
@@ -2957,7 +2975,7 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2957 2975
2958 for (i = 0; i < track->num_cb; i++) { 2976 for (i = 0; i < track->num_cb; i++) {
2959 if (track->cb[i].robj == NULL) { 2977 if (track->cb[i].robj == NULL) {
2960 if (!(track->fastfill || track->color_channel_mask || 2978 if (!(track->zb_cb_clear || track->color_channel_mask ||
2961 track->blend_read_enable)) { 2979 track->blend_read_enable)) {
2962 continue; 2980 continue;
2963 } 2981 }
@@ -2994,7 +3012,11 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2994 } 3012 }
2995 } 3013 }
2996 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3014 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
2997 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; 3015 if (track->vap_vf_cntl & (1 << 14)) {
3016 nverts = track->vap_alt_nverts;
3017 } else {
3018 nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
3019 }
2998 switch (prim_walk) { 3020 switch (prim_walk) {
2999 case 1: 3021 case 1:
3000 for (i = 0; i < track->num_arrays; i++) { 3022 for (i = 0; i < track->num_arrays; i++) {
@@ -3440,6 +3462,7 @@ int r100_suspend(struct radeon_device *rdev)
3440 3462
3441void r100_fini(struct radeon_device *rdev) 3463void r100_fini(struct radeon_device *rdev)
3442{ 3464{
3465 radeon_pm_fini(rdev);
3443 r100_cp_fini(rdev); 3466 r100_cp_fini(rdev);
3444 r100_wb_fini(rdev); 3467 r100_wb_fini(rdev);
3445 r100_ib_fini(rdev); 3468 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index b27a6999d219..f47cdca1c004 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -64,6 +64,7 @@ struct r100_cs_track {
64 unsigned maxy; 64 unsigned maxy;
65 unsigned vtx_size; 65 unsigned vtx_size;
66 unsigned vap_vf_cntl; 66 unsigned vap_vf_cntl;
67 unsigned vap_alt_nverts;
67 unsigned immd_dwords; 68 unsigned immd_dwords;
68 unsigned num_arrays; 69 unsigned num_arrays;
69 unsigned max_indx; 70 unsigned max_indx;
@@ -74,7 +75,7 @@ struct r100_cs_track {
74 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 75 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
75 bool z_enabled; 76 bool z_enabled;
76 bool separate_cube; 77 bool separate_cube;
77 bool fastfill; 78 bool zb_cb_clear;
78 bool blend_read_enable; 79 bool blend_read_enable;
79}; 80};
80 81
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 1146c9909c2c..85617c311212 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -30,6 +30,7 @@
30#include "radeon_drm.h" 30#include "radeon_drm.h"
31#include "radeon_reg.h" 31#include "radeon_reg.h"
32#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
33 34
34#include "r100d.h" 35#include "r100d.h"
35#include "r200_reg_safe.h" 36#include "r200_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 4cef90cd74e5..a5ff8076b423 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -26,10 +26,12 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "drm.h" 31#include "drm.h"
31#include "radeon_reg.h" 32#include "radeon_reg.h"
32#include "radeon.h" 33#include "radeon.h"
34#include "radeon_asic.h"
33#include "radeon_drm.h" 35#include "radeon_drm.h"
34#include "r100_track.h" 36#include "r100_track.h"
35#include "r300d.h" 37#include "r300d.h"
@@ -164,9 +166,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
164 166
165void rv370_pcie_gart_fini(struct radeon_device *rdev) 167void rv370_pcie_gart_fini(struct radeon_device *rdev)
166{ 168{
169 radeon_gart_fini(rdev);
167 rv370_pcie_gart_disable(rdev); 170 rv370_pcie_gart_disable(rdev);
168 radeon_gart_table_vram_free(rdev); 171 radeon_gart_table_vram_free(rdev);
169 radeon_gart_fini(rdev);
170} 172}
171 173
172void r300_fence_ring_emit(struct radeon_device *rdev, 174void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -322,12 +324,12 @@ void r300_gpu_init(struct radeon_device *rdev)
322 uint32_t gb_tile_config, tmp; 324 uint32_t gb_tile_config, tmp;
323 325
324 r100_hdp_reset(rdev); 326 r100_hdp_reset(rdev);
325 /* FIXME: rv380 one pipes ? */ 327 if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) ||
326 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) { 328 (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) {
327 /* r300,r350 */ 329 /* r300,r350 */
328 rdev->num_gb_pipes = 2; 330 rdev->num_gb_pipes = 2;
329 } else { 331 } else {
330 /* rv350,rv370,rv380 */ 332 /* rv350,rv370,rv380,r300 AD, r350 AH */
331 rdev->num_gb_pipes = 1; 333 rdev->num_gb_pipes = 1;
332 } 334 }
333 rdev->num_z_pipes = 1; 335 rdev->num_z_pipes = 1;
@@ -481,6 +483,7 @@ void r300_mc_init(struct radeon_device *rdev)
481 radeon_vram_location(rdev, &rdev->mc, base); 483 radeon_vram_location(rdev, &rdev->mc, base);
482 if (!(rdev->flags & RADEON_IS_AGP)) 484 if (!(rdev->flags & RADEON_IS_AGP))
483 radeon_gtt_location(rdev, &rdev->mc); 485 radeon_gtt_location(rdev, &rdev->mc);
486 radeon_update_bandwidth_info(rdev);
484} 487}
485 488
486void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) 489void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -726,6 +729,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
726 /* VAP_VF_MAX_VTX_INDX */ 729 /* VAP_VF_MAX_VTX_INDX */
727 track->max_indx = idx_value & 0x00FFFFFFUL; 730 track->max_indx = idx_value & 0x00FFFFFFUL;
728 break; 731 break;
732 case 0x2088:
733 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
734 if (p->rdev->family < CHIP_RV515)
735 goto fail;
736 track->vap_alt_nverts = idx_value & 0xFFFFFF;
737 break;
729 case 0x43E4: 738 case 0x43E4:
730 /* SC_SCISSOR1 */ 739 /* SC_SCISSOR1 */
731 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; 740 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
@@ -763,7 +772,6 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
763 tmp = idx_value & ~(0x7 << 16); 772 tmp = idx_value & ~(0x7 << 16);
764 tmp |= tile_flags; 773 tmp |= tile_flags;
765 ib[idx] = tmp; 774 ib[idx] = tmp;
766
767 i = (reg - 0x4E38) >> 2; 775 i = (reg - 0x4E38) >> 2;
768 track->cb[i].pitch = idx_value & 0x3FFE; 776 track->cb[i].pitch = idx_value & 0x3FFE;
769 switch (((idx_value >> 21) & 0xF)) { 777 switch (((idx_value >> 21) & 0xF)) {
@@ -1036,7 +1044,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1036 break; 1044 break;
1037 case 0x4d1c: 1045 case 0x4d1c:
1038 /* ZB_BW_CNTL */ 1046 /* ZB_BW_CNTL */
1039 track->fastfill = !!(idx_value & (1 << 2)); 1047 track->zb_cb_clear = !!(idx_value & (1 << 5));
1040 break; 1048 break;
1041 case 0x4e04: 1049 case 0x4e04:
1042 /* RB3D_BLENDCNTL */ 1050 /* RB3D_BLENDCNTL */
@@ -1048,11 +1056,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1048 break; 1056 break;
1049 /* fallthrough do not move */ 1057 /* fallthrough do not move */
1050 default: 1058 default:
1051 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1059 goto fail;
1052 reg, idx);
1053 return -EINVAL;
1054 } 1060 }
1055 return 0; 1061 return 0;
1062fail:
1063 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1064 reg, idx);
1065 return -EINVAL;
1056} 1066}
1057 1067
1058static int r300_packet3_check(struct radeon_cs_parser *p, 1068static int r300_packet3_check(struct radeon_cs_parser *p,
@@ -1334,6 +1344,7 @@ int r300_suspend(struct radeon_device *rdev)
1334 1344
1335void r300_fini(struct radeon_device *rdev) 1345void r300_fini(struct radeon_device *rdev)
1336{ 1346{
1347 radeon_pm_fini(rdev);
1337 r100_cp_fini(rdev); 1348 r100_cp_fini(rdev);
1338 r100_wb_fini(rdev); 1349 r100_wb_fini(rdev);
1339 r100_ib_fini(rdev); 1350 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index ea46d558e8f3..c5c2742e4140 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -921,7 +921,7 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
921 921
922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer, 922 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
923 sizeof(stack_ptr_addr), &stack_ptr_addr); 923 sizeof(stack_ptr_addr), &stack_ptr_addr);
924 ref_age_base = (u32 *)(unsigned long)*ptr_addr; 924 ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
925 925
926 for (i=0; i < header.scratch.n_bufs; i++) { 926 for (i=0; i < header.scratch.n_bufs; i++) {
927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); 927 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c7593b8f58ee..c2bda4ad62e7 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -26,9 +26,11 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "radeon_reg.h" 31#include "radeon_reg.h"
31#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
32#include "atom.h" 34#include "atom.h"
33#include "r100d.h" 35#include "r100d.h"
34#include "r420d.h" 36#include "r420d.h"
@@ -57,6 +59,12 @@ void r420_pipes_init(struct radeon_device *rdev)
57 /* get max number of pipes */ 59 /* get max number of pipes */
58 gb_pipe_select = RREG32(0x402C); 60 gb_pipe_select = RREG32(0x402C);
59 num_pipes = ((gb_pipe_select >> 12) & 3) + 1; 61 num_pipes = ((gb_pipe_select >> 12) & 3) + 1;
62
63 /* SE chips have 1 pipe */
64 if ((rdev->pdev->device == 0x5e4c) ||
65 (rdev->pdev->device == 0x5e4f))
66 num_pipes = 1;
67
60 rdev->num_gb_pipes = num_pipes; 68 rdev->num_gb_pipes = num_pipes;
61 tmp = 0; 69 tmp = 0;
62 switch (num_pipes) { 70 switch (num_pipes) {
@@ -266,6 +274,7 @@ int r420_suspend(struct radeon_device *rdev)
266 274
267void r420_fini(struct radeon_device *rdev) 275void r420_fini(struct radeon_device *rdev)
268{ 276{
277 radeon_pm_fini(rdev);
269 r100_cp_fini(rdev); 278 r100_cp_fini(rdev);
270 r100_wb_fini(rdev); 279 r100_wb_fini(rdev);
271 r100_ib_fini(rdev); 280 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2b8a5dd13516..3c44b8d39318 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -27,6 +27,7 @@
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "radeon_asic.h"
30#include "atom.h" 31#include "atom.h"
31#include "r520d.h" 32#include "r520d.h"
32 33
@@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev)
121 122
122void r520_mc_init(struct radeon_device *rdev) 123void r520_mc_init(struct radeon_device *rdev)
123{ 124{
124 fixed20_12 a;
125 125
126 r520_vram_get_type(rdev); 126 r520_vram_get_type(rdev);
127 r100_vram_init_sizes(rdev); 127 r100_vram_init_sizes(rdev);
128 radeon_vram_location(rdev, &rdev->mc, 0); 128 radeon_vram_location(rdev, &rdev->mc, 0);
129 if (!(rdev->flags & RADEON_IS_AGP)) 129 if (!(rdev->flags & RADEON_IS_AGP))
130 radeon_gtt_location(rdev, &rdev->mc); 130 radeon_gtt_location(rdev, &rdev->mc);
131 /* FIXME: we should enforce default clock in case GPU is not in 131 radeon_update_bandwidth_info(rdev);
132 * default setup
133 */
134 a.full = rfixed_const(100);
135 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
136 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
137} 132}
138 133
139void r520_mc_program(struct radeon_device *rdev) 134void r520_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c52290197292..8f3454e2056a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -25,12 +25,14 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/slab.h>
28#include <linux/seq_file.h> 29#include <linux/seq_file.h>
29#include <linux/firmware.h> 30#include <linux/firmware.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "radeon_drm.h" 33#include "radeon_drm.h"
33#include "radeon.h" 34#include "radeon.h"
35#include "radeon_asic.h"
34#include "radeon_mode.h" 36#include "radeon_mode.h"
35#include "r600d.h" 37#include "r600d.h"
36#include "atom.h" 38#include "atom.h"
@@ -491,9 +493,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
491 493
492void r600_pcie_gart_fini(struct radeon_device *rdev) 494void r600_pcie_gart_fini(struct radeon_device *rdev)
493{ 495{
496 radeon_gart_fini(rdev);
494 r600_pcie_gart_disable(rdev); 497 r600_pcie_gart_disable(rdev);
495 radeon_gart_table_vram_free(rdev); 498 radeon_gart_table_vram_free(rdev);
496 radeon_gart_fini(rdev);
497} 499}
498 500
499void r600_agp_enable(struct radeon_device *rdev) 501void r600_agp_enable(struct radeon_device *rdev)
@@ -675,7 +677,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
675 677
676int r600_mc_init(struct radeon_device *rdev) 678int r600_mc_init(struct radeon_device *rdev)
677{ 679{
678 fixed20_12 a;
679 u32 tmp; 680 u32 tmp;
680 int chansize, numchan; 681 int chansize, numchan;
681 682
@@ -719,14 +720,10 @@ int r600_mc_init(struct radeon_device *rdev)
719 rdev->mc.real_vram_size = rdev->mc.aper_size; 720 rdev->mc.real_vram_size = rdev->mc.aper_size;
720 } 721 }
721 r600_vram_gtt_location(rdev, &rdev->mc); 722 r600_vram_gtt_location(rdev, &rdev->mc);
722 /* FIXME: we should enforce default clock in case GPU is not in 723
723 * default setup
724 */
725 a.full = rfixed_const(100);
726 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
727 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
728 if (rdev->flags & RADEON_IS_IGP) 724 if (rdev->flags & RADEON_IS_IGP)
729 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 725 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
726 radeon_update_bandwidth_info(rdev);
730 return 0; 727 return 0;
731} 728}
732 729
@@ -1132,6 +1129,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1132 /* Setup pipes */ 1129 /* Setup pipes */
1133 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1130 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1134 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1131 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1132 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1135 1133
1136 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1134 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1137 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1135 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -2119,6 +2117,7 @@ int r600_init(struct radeon_device *rdev)
2119 2117
2120void r600_fini(struct radeon_device *rdev) 2118void r600_fini(struct radeon_device *rdev)
2121{ 2119{
2120 radeon_pm_fini(rdev);
2122 r600_audio_fini(rdev); 2121 r600_audio_fini(rdev);
2123 r600_blit_fini(rdev); 2122 r600_blit_fini(rdev);
2124 r600_cp_fini(rdev); 2123 r600_cp_fini(rdev);
@@ -2398,19 +2397,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2398 WREG32(DC_HPD4_INT_CONTROL, tmp); 2397 WREG32(DC_HPD4_INT_CONTROL, tmp);
2399 if (ASIC_IS_DCE32(rdev)) { 2398 if (ASIC_IS_DCE32(rdev)) {
2400 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2399 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2401 WREG32(DC_HPD5_INT_CONTROL, 0); 2400 WREG32(DC_HPD5_INT_CONTROL, tmp);
2402 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; 2401 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2403 WREG32(DC_HPD6_INT_CONTROL, 0); 2402 WREG32(DC_HPD6_INT_CONTROL, tmp);
2404 } 2403 }
2405 } else { 2404 } else {
2406 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 2405 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2407 WREG32(DACB_AUTODETECT_INT_CONTROL, 0); 2406 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2408 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2407 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2409 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); 2408 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2410 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2409 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2411 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); 2410 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2412 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; 2411 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2413 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); 2412 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2414 } 2413 }
2415} 2414}
2416 2415
@@ -2765,6 +2764,7 @@ restart_ih:
2765 case 0: /* D1 vblank */ 2764 case 0: /* D1 vblank */
2766 if (disp_int & LB_D1_VBLANK_INTERRUPT) { 2765 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2767 drm_handle_vblank(rdev->ddev, 0); 2766 drm_handle_vblank(rdev->ddev, 0);
2767 rdev->pm.vblank_sync = true;
2768 wake_up(&rdev->irq.vblank_queue); 2768 wake_up(&rdev->irq.vblank_queue);
2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT; 2769 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2770 DRM_DEBUG("IH: D1 vblank\n"); 2770 DRM_DEBUG("IH: D1 vblank\n");
@@ -2786,6 +2786,7 @@ restart_ih:
2786 case 0: /* D2 vblank */ 2786 case 0: /* D2 vblank */
2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) { 2787 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2788 drm_handle_vblank(rdev->ddev, 1); 2788 drm_handle_vblank(rdev->ddev, 1);
2789 rdev->pm.vblank_sync = true;
2789 wake_up(&rdev->irq.vblank_queue); 2790 wake_up(&rdev->irq.vblank_queue);
2790 disp_int &= ~LB_D2_VBLANK_INTERRUPT; 2791 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2791 DRM_DEBUG("IH: D2 vblank\n"); 2792 DRM_DEBUG("IH: D2 vblank\n");
@@ -2834,14 +2835,14 @@ restart_ih:
2834 break; 2835 break;
2835 case 10: 2836 case 10:
2836 if (disp_int_cont2 & DC_HPD5_INTERRUPT) { 2837 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2837 disp_int_cont &= ~DC_HPD5_INTERRUPT; 2838 disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
2838 queue_hotplug = true; 2839 queue_hotplug = true;
2839 DRM_DEBUG("IH: HPD5\n"); 2840 DRM_DEBUG("IH: HPD5\n");
2840 } 2841 }
2841 break; 2842 break;
2842 case 12: 2843 case 12:
2843 if (disp_int_cont2 & DC_HPD6_INTERRUPT) { 2844 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2844 disp_int_cont &= ~DC_HPD6_INTERRUPT; 2845 disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
2845 queue_hotplug = true; 2846 queue_hotplug = true;
2846 DRM_DEBUG("IH: HPD6\n"); 2847 DRM_DEBUG("IH: HPD6\n");
2847 } 2848 }
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index db928016d034..1d898051c631 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
35 */ 35 */
36static int r600_audio_chipset_supported(struct radeon_device *rdev) 36static int r600_audio_chipset_supported(struct radeon_device *rdev)
37{ 37{
38 return rdev->family >= CHIP_R600 38 return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
39 || rdev->family == CHIP_RS600 39 || rdev->family == CHIP_RS600
40 || rdev->family == CHIP_RS690 40 || rdev->family == CHIP_RS690
41 || rdev->family == CHIP_RS740; 41 || rdev->family == CHIP_RS740;
@@ -182,41 +182,6 @@ int r600_audio_init(struct radeon_device *rdev)
182} 182}
183 183
184/* 184/*
185 * determin how the encoders and audio interface is wired together
186 */
187int r600_audio_tmds_index(struct drm_encoder *encoder)
188{
189 struct drm_device *dev = encoder->dev;
190 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
191 struct drm_encoder *other;
192
193 switch (radeon_encoder->encoder_id) {
194 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
195 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
196 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
197 return 0;
198
199 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
200 /* special case check if an TMDS1 is present */
201 list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
202 if (to_radeon_encoder(other)->encoder_id ==
203 ENCODER_OBJECT_ID_INTERNAL_TMDS1)
204 return 1;
205 }
206 return 0;
207
208 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
209 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
210 return 1;
211
212 default:
213 DRM_ERROR("Unsupported encoder type 0x%02X\n",
214 radeon_encoder->encoder_id);
215 return -1;
216 }
217}
218
219/*
220 * atach the audio codec to the clock source of the encoder 185 * atach the audio codec to the clock source of the encoder
221 */ 186 */
222void r600_audio_set_clock(struct drm_encoder *encoder, int clock) 187void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
224 struct drm_device *dev = encoder->dev; 189 struct drm_device *dev = encoder->dev;
225 struct radeon_device *rdev = dev->dev_private; 190 struct radeon_device *rdev = dev->dev_private;
226 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
227 int base_rate = 48000; 193 int base_rate = 48000;
228 194
229 switch (radeon_encoder->encoder_id) { 195 switch (radeon_encoder->encoder_id) {
@@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
231 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 197 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
232 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301); 198 WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
233 break; 199 break;
234
235 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 200 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
236 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 201 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
237 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 202 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
238 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 203 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
239 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); 204 WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
240 break; 205 break;
241
242 default: 206 default:
243 DRM_ERROR("Unsupported encoder type 0x%02X\n", 207 DRM_ERROR("Unsupported encoder type 0x%02X\n",
244 radeon_encoder->encoder_id); 208 radeon_encoder->encoder_id);
245 return; 209 return;
246 } 210 }
247 211
248 switch (r600_audio_tmds_index(encoder)) { 212 switch (dig->dig_encoder) {
249 case 0: 213 case 0:
250 WREG32(R600_AUDIO_PLL1_MUL, base_rate*50); 214 WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
251 WREG32(R600_AUDIO_PLL1_DIV, clock*100); 215 WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
252 WREG32(R600_AUDIO_CLK_SRCSEL, 0); 216 WREG32(R600_AUDIO_CLK_SRCSEL, 0);
253 break; 217 break;
254 218
255 case 1: 219 case 1:
256 WREG32(R600_AUDIO_PLL2_MUL, base_rate*50); 220 WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
257 WREG32(R600_AUDIO_PLL2_DIV, clock*100); 221 WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
258 WREG32(R600_AUDIO_CLK_SRCSEL, 1); 222 WREG32(R600_AUDIO_CLK_SRCSEL, 1);
259 break; 223 break;
224 default:
225 dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
226 radeon_encoder->encoder_id);
227 return;
260 } 228 }
261} 229}
262 230
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index a112c59f9d82..0271b53fa2dd 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -1,7 +1,42 @@
1/*
2 * Copyright 2009 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com>
25 */
1 26
2#include <linux/types.h> 27#include <linux/types.h>
3#include <linux/kernel.h> 28#include <linux/kernel.h>
4 29
30/*
31 * R6xx+ cards need to use the 3D engine to blit data which requires
32 * quite a bit of hw state setup. Rather than pull the whole 3D driver
33 * (which normally generates the 3D state) into the DRM, we opt to use
34 * statically generated state tables. The regsiter state and shaders
35 * were hand generated to support blitting functionality. See the 3D
36 * driver or documentation for descriptions of the registers and
37 * shader instructions.
38 */
39
5const u32 r6xx_default_state[] = 40const u32 r6xx_default_state[] =
6{ 41{
7 0xc0002400, 42 0xc0002400,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 40416c068d9f..68e6f4349309 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev,
1548 1548
1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1549 RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 1550 RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1551 RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1551 1552
1552 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1553 RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1553 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); 1554 RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
1554 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); 1555 RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
1556 RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
1557 RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
1555 1558
1556 num_qd_pipes = 1559 num_qd_pipes =
1557 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); 1560 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cd2c63bce501..c39c1bc13016 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -45,6 +45,7 @@ struct r600_cs_track {
45 u32 nbanks; 45 u32 nbanks;
46 u32 npipes; 46 u32 npipes;
47 /* value we track */ 47 /* value we track */
48 u32 sq_config;
48 u32 nsamples; 49 u32 nsamples;
49 u32 cb_color_base_last[8]; 50 u32 cb_color_base_last[8];
50 struct radeon_bo *cb_color_bo[8]; 51 struct radeon_bo *cb_color_bo[8];
@@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track)
141{ 142{
142 int i; 143 int i;
143 144
145 /* assume DX9 mode */
146 track->sq_config = DX9_CONSTS;
144 for (i = 0; i < 8; i++) { 147 for (i = 0; i < 8; i++) {
145 track->cb_color_base_last[i] = 0; 148 track->cb_color_base_last[i] = 0;
146 track->cb_color_size[i] = 0; 149 track->cb_color_size[i] = 0;
@@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
715 tmp =radeon_get_ib_value(p, idx); 718 tmp =radeon_get_ib_value(p, idx);
716 ib[idx] = 0; 719 ib[idx] = 0;
717 break; 720 break;
721 case SQ_CONFIG:
722 track->sq_config = radeon_get_ib_value(p, idx);
723 break;
718 case R_028800_DB_DEPTH_CONTROL: 724 case R_028800_DB_DEPTH_CONTROL:
719 track->db_depth_control = radeon_get_ib_value(p, idx); 725 track->db_depth_control = radeon_get_ib_value(p, idx);
720 break; 726 break;
@@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
869 case SQ_PGM_START_VS: 875 case SQ_PGM_START_VS:
870 case SQ_PGM_START_GS: 876 case SQ_PGM_START_GS:
871 case SQ_PGM_START_PS: 877 case SQ_PGM_START_PS:
878 case SQ_ALU_CONST_CACHE_GS_0:
879 case SQ_ALU_CONST_CACHE_GS_1:
880 case SQ_ALU_CONST_CACHE_GS_2:
881 case SQ_ALU_CONST_CACHE_GS_3:
882 case SQ_ALU_CONST_CACHE_GS_4:
883 case SQ_ALU_CONST_CACHE_GS_5:
884 case SQ_ALU_CONST_CACHE_GS_6:
885 case SQ_ALU_CONST_CACHE_GS_7:
886 case SQ_ALU_CONST_CACHE_GS_8:
887 case SQ_ALU_CONST_CACHE_GS_9:
888 case SQ_ALU_CONST_CACHE_GS_10:
889 case SQ_ALU_CONST_CACHE_GS_11:
890 case SQ_ALU_CONST_CACHE_GS_12:
891 case SQ_ALU_CONST_CACHE_GS_13:
892 case SQ_ALU_CONST_CACHE_GS_14:
893 case SQ_ALU_CONST_CACHE_GS_15:
894 case SQ_ALU_CONST_CACHE_PS_0:
895 case SQ_ALU_CONST_CACHE_PS_1:
896 case SQ_ALU_CONST_CACHE_PS_2:
897 case SQ_ALU_CONST_CACHE_PS_3:
898 case SQ_ALU_CONST_CACHE_PS_4:
899 case SQ_ALU_CONST_CACHE_PS_5:
900 case SQ_ALU_CONST_CACHE_PS_6:
901 case SQ_ALU_CONST_CACHE_PS_7:
902 case SQ_ALU_CONST_CACHE_PS_8:
903 case SQ_ALU_CONST_CACHE_PS_9:
904 case SQ_ALU_CONST_CACHE_PS_10:
905 case SQ_ALU_CONST_CACHE_PS_11:
906 case SQ_ALU_CONST_CACHE_PS_12:
907 case SQ_ALU_CONST_CACHE_PS_13:
908 case SQ_ALU_CONST_CACHE_PS_14:
909 case SQ_ALU_CONST_CACHE_PS_15:
910 case SQ_ALU_CONST_CACHE_VS_0:
911 case SQ_ALU_CONST_CACHE_VS_1:
912 case SQ_ALU_CONST_CACHE_VS_2:
913 case SQ_ALU_CONST_CACHE_VS_3:
914 case SQ_ALU_CONST_CACHE_VS_4:
915 case SQ_ALU_CONST_CACHE_VS_5:
916 case SQ_ALU_CONST_CACHE_VS_6:
917 case SQ_ALU_CONST_CACHE_VS_7:
918 case SQ_ALU_CONST_CACHE_VS_8:
919 case SQ_ALU_CONST_CACHE_VS_9:
920 case SQ_ALU_CONST_CACHE_VS_10:
921 case SQ_ALU_CONST_CACHE_VS_11:
922 case SQ_ALU_CONST_CACHE_VS_12:
923 case SQ_ALU_CONST_CACHE_VS_13:
924 case SQ_ALU_CONST_CACHE_VS_14:
925 case SQ_ALU_CONST_CACHE_VS_15:
872 r = r600_cs_packet_next_reloc(p, &reloc); 926 r = r600_cs_packet_next_reloc(p, &reloc);
873 if (r) { 927 if (r) {
874 dev_warn(p->dev, "bad SET_CONTEXT_REG " 928 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1226 } 1280 }
1227 break; 1281 break;
1228 case PACKET3_SET_ALU_CONST: 1282 case PACKET3_SET_ALU_CONST:
1229 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET; 1283 if (track->sq_config & DX9_CONSTS) {
1230 end_reg = 4 * pkt->count + start_reg - 4; 1284 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
1231 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 1285 end_reg = 4 * pkt->count + start_reg - 4;
1232 (start_reg >= PACKET3_SET_ALU_CONST_END) || 1286 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
1233 (end_reg >= PACKET3_SET_ALU_CONST_END)) { 1287 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
1234 DRM_ERROR("bad SET_ALU_CONST\n"); 1288 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
1235 return -EINVAL; 1289 DRM_ERROR("bad SET_ALU_CONST\n");
1290 return -EINVAL;
1291 }
1236 } 1292 }
1237 break; 1293 break;
1238 case PACKET3_SET_BOOL_CONST: 1294 case PACKET3_SET_BOOL_CONST:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index fcc949df0e5d..2616b822ba68 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format {
42 */ 42 */
43enum r600_hdmi_iec_status_bits { 43enum r600_hdmi_iec_status_bits {
44 AUDIO_STATUS_DIG_ENABLE = 0x01, 44 AUDIO_STATUS_DIG_ENABLE = 0x01,
45 AUDIO_STATUS_V = 0x02, 45 AUDIO_STATUS_V = 0x02,
46 AUDIO_STATUS_VCFG = 0x04, 46 AUDIO_STATUS_VCFG = 0x04,
47 AUDIO_STATUS_EMPHASIS = 0x08, 47 AUDIO_STATUS_EMPHASIS = 0x08,
48 AUDIO_STATUS_COPYRIGHT = 0x10, 48 AUDIO_STATUS_COPYRIGHT = 0x10,
49 AUDIO_STATUS_NONAUDIO = 0x20, 49 AUDIO_STATUS_NONAUDIO = 0x20,
50 AUDIO_STATUS_PROFESSIONAL = 0x40, 50 AUDIO_STATUS_PROFESSIONAL = 0x40,
51 AUDIO_STATUS_LEVEL = 0x80 51 AUDIO_STATUS_LEVEL = 0x80
52}; 52};
53 53
54struct { 54struct {
@@ -85,7 +85,7 @@ struct {
85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq) 85static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
86{ 86{
87 if (*CTS == 0) 87 if (*CTS == 0)
88 *CTS = clock*N/(128*freq)*1000; 88 *CTS = clock * N / (128 * freq) * 1000;
89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n", 89 DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
90 N, *CTS, freq); 90 N, *CTS, freq);
91} 91}
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType,
131 uint8_t length, 131 uint8_t length,
132 uint8_t *frame) 132 uint8_t *frame)
133{ 133{
134 int i; 134 int i;
135 frame[0] = packetType + versionNumber + length; 135 frame[0] = packetType + versionNumber + length;
136 for (i = 1; i <= length; i++) 136 for (i = 1; i <= length; i++)
137 frame[0] += frame[i]; 137 frame[0] += frame[i];
138 frame[0] = 0x100 - frame[0]; 138 frame[0] = 0x100 - frame[0];
139} 139}
140 140
141/* 141/*
@@ -314,6 +314,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
314 struct radeon_device *rdev = dev->dev_private; 314 struct radeon_device *rdev = dev->dev_private;
315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 315 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
316 316
317 if (ASIC_IS_DCE4(rdev))
318 return;
319
317 if (!offset) 320 if (!offset)
318 return; 321 return;
319 322
@@ -417,90 +420,147 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
417 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000); 420 WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
418} 421}
419 422
420/* 423static int r600_hdmi_find_free_block(struct drm_device *dev)
421 * enable/disable the HDMI engine 424{
422 */ 425 struct radeon_device *rdev = dev->dev_private;
423void r600_hdmi_enable(struct drm_encoder *encoder, int enable) 426 struct drm_encoder *encoder;
427 struct radeon_encoder *radeon_encoder;
428 bool free_blocks[3] = { true, true, true };
429
430 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
431 radeon_encoder = to_radeon_encoder(encoder);
432 switch (radeon_encoder->hdmi_offset) {
433 case R600_HDMI_BLOCK1:
434 free_blocks[0] = false;
435 break;
436 case R600_HDMI_BLOCK2:
437 free_blocks[1] = false;
438 break;
439 case R600_HDMI_BLOCK3:
440 free_blocks[2] = false;
441 break;
442 }
443 }
444
445 if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
446 return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
447 } else if (rdev->family >= CHIP_R600) {
448 if (free_blocks[0])
449 return R600_HDMI_BLOCK1;
450 else if (free_blocks[1])
451 return R600_HDMI_BLOCK2;
452 }
453 return 0;
454}
455
456static void r600_hdmi_assign_block(struct drm_encoder *encoder)
424{ 457{
425 struct drm_device *dev = encoder->dev; 458 struct drm_device *dev = encoder->dev;
426 struct radeon_device *rdev = dev->dev_private; 459 struct radeon_device *rdev = dev->dev_private;
427 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 460 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
428 uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset; 461 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
429 462
430 if (!offset) 463 if (!dig) {
464 dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
431 return; 465 return;
466 }
432 467
433 DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset); 468 if (ASIC_IS_DCE4(rdev)) {
434 469 /* TODO */
435 /* some version of atombios ignore the enable HDMI flag 470 } else if (ASIC_IS_DCE3(rdev)) {
436 * so enabling/disabling HDMI was moved here for TMDS1+2 */ 471 radeon_encoder->hdmi_offset = dig->dig_encoder ?
437 switch (radeon_encoder->encoder_id) { 472 R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
438 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 473 if (ASIC_IS_DCE32(rdev))
439 WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4); 474 radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
440 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0); 475 R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
441 break; 476 } else if (rdev->family >= CHIP_R600) {
442 477 radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
443 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
444 WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
445 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
446 break;
447
448 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
451 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
452 /* This part is doubtfull in my opinion */
453 WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
454 break;
455
456 default:
457 DRM_ERROR("unknown HDMI output type\n");
458 break;
459 } 478 }
460} 479}
461 480
462/* 481/*
463 * determin at which register offset the HDMI encoder is 482 * enable the HDMI engine
464 */ 483 */
465void r600_hdmi_init(struct drm_encoder *encoder) 484void r600_hdmi_enable(struct drm_encoder *encoder)
466{ 485{
486 struct drm_device *dev = encoder->dev;
487 struct radeon_device *rdev = dev->dev_private;
467 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 488 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
468 489
469 switch (radeon_encoder->encoder_id) { 490 if (ASIC_IS_DCE4(rdev))
470 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: 491 return;
471 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 492
472 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 493 if (!radeon_encoder->hdmi_offset) {
473 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; 494 r600_hdmi_assign_block(encoder);
474 break; 495 if (!radeon_encoder->hdmi_offset) {
475 496 dev_warn(rdev->dev, "Could not find HDMI block for "
476 case ENCODER_OBJECT_ID_INTERNAL_LVTM1: 497 "0x%x encoder\n", radeon_encoder->encoder_id);
477 switch (r600_audio_tmds_index(encoder)) { 498 return;
478 case 0: 499 }
479 radeon_encoder->hdmi_offset = R600_HDMI_TMDS1; 500 }
501
502 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
503 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
504 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
505 int offset = radeon_encoder->hdmi_offset;
506 switch (radeon_encoder->encoder_id) {
507 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
508 WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
509 WREG32(offset + R600_HDMI_ENABLE, 0x101);
480 break; 510 break;
481 case 1: 511 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
482 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; 512 WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
513 WREG32(offset + R600_HDMI_ENABLE, 0x105);
483 break; 514 break;
484 default: 515 default:
485 radeon_encoder->hdmi_offset = 0; 516 dev_err(rdev->dev, "Unknown HDMI output type\n");
486 break; 517 break;
487 } 518 }
488 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 519 }
489 radeon_encoder->hdmi_offset = R600_HDMI_TMDS2; 520
490 break; 521 DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
522 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
523}
524
525/*
526 * disable the HDMI engine
527 */
528void r600_hdmi_disable(struct drm_encoder *encoder)
529{
530 struct drm_device *dev = encoder->dev;
531 struct radeon_device *rdev = dev->dev_private;
532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
491 533
492 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 534 if (ASIC_IS_DCE4(rdev))
493 radeon_encoder->hdmi_offset = R600_HDMI_DIG; 535 return;
494 break;
495 536
496 default: 537 if (!radeon_encoder->hdmi_offset) {
497 radeon_encoder->hdmi_offset = 0; 538 dev_err(rdev->dev, "Disabling not enabled HDMI\n");
498 break; 539 return;
499 } 540 }
500 541
501 DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n", 542 DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
502 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id); 543 radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
544
545 if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
546 WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
547 } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
548 int offset = radeon_encoder->hdmi_offset;
549 switch (radeon_encoder->encoder_id) {
550 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
551 WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
552 WREG32(offset + R600_HDMI_ENABLE, 0);
553 break;
554 case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
555 WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
556 WREG32(offset + R600_HDMI_ENABLE, 0);
557 break;
558 default:
559 dev_err(rdev->dev, "Unknown HDMI output type\n");
560 break;
561 }
562 }
503 563
504 /* TODO: make this configureable */ 564 radeon_encoder->hdmi_offset = 0;
505 radeon_encoder->hdmi_audio_workaround = 0; 565 radeon_encoder->hdmi_config_offset = 0;
506} 566}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ffdeda9..7b1d22370f6e 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -152,9 +152,9 @@
152#define R600_AUDIO_STATUS_BITS 0x73d8 152#define R600_AUDIO_STATUS_BITS 0x73d8
153 153
154/* HDMI base register addresses */ 154/* HDMI base register addresses */
155#define R600_HDMI_TMDS1 0x7400 155#define R600_HDMI_BLOCK1 0x7400
156#define R600_HDMI_TMDS2 0x7700 156#define R600_HDMI_BLOCK2 0x7700
157#define R600_HDMI_DIG 0x7800 157#define R600_HDMI_BLOCK3 0x7800
158 158
159/* HDMI registers */ 159/* HDMI registers */
160#define R600_HDMI_ENABLE 0x00 160#define R600_HDMI_ENABLE 0x00
@@ -185,4 +185,8 @@
185#define R600_HDMI_AUDIO_DEBUG_2 0xe8 185#define R600_HDMI_AUDIO_DEBUG_2 0xe8
186#define R600_HDMI_AUDIO_DEBUG_3 0xec 186#define R600_HDMI_AUDIO_DEBUG_3 0xec
187 187
188/* HDMI additional config base register addresses */
189#define R600_HDMI_CONFIG1 0x7600
190#define R600_HDMI_CONFIG2 0x7a00
191
188#endif 192#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 5b2e4d442823..59c1f8793e60 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -77,6 +77,55 @@
77#define CB_COLOR0_FRAG 0x280e0 77#define CB_COLOR0_FRAG 0x280e0
78#define CB_COLOR0_MASK 0x28100 78#define CB_COLOR0_MASK 0x28100
79 79
80#define SQ_ALU_CONST_CACHE_PS_0 0x28940
81#define SQ_ALU_CONST_CACHE_PS_1 0x28944
82#define SQ_ALU_CONST_CACHE_PS_2 0x28948
83#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
84#define SQ_ALU_CONST_CACHE_PS_4 0x28950
85#define SQ_ALU_CONST_CACHE_PS_5 0x28954
86#define SQ_ALU_CONST_CACHE_PS_6 0x28958
87#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
88#define SQ_ALU_CONST_CACHE_PS_8 0x28960
89#define SQ_ALU_CONST_CACHE_PS_9 0x28964
90#define SQ_ALU_CONST_CACHE_PS_10 0x28968
91#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
92#define SQ_ALU_CONST_CACHE_PS_12 0x28970
93#define SQ_ALU_CONST_CACHE_PS_13 0x28974
94#define SQ_ALU_CONST_CACHE_PS_14 0x28978
95#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
96#define SQ_ALU_CONST_CACHE_VS_0 0x28980
97#define SQ_ALU_CONST_CACHE_VS_1 0x28984
98#define SQ_ALU_CONST_CACHE_VS_2 0x28988
99#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
100#define SQ_ALU_CONST_CACHE_VS_4 0x28990
101#define SQ_ALU_CONST_CACHE_VS_5 0x28994
102#define SQ_ALU_CONST_CACHE_VS_6 0x28998
103#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
104#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
105#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
106#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
107#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
108#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
109#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
110#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
111#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
112#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
113#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
114#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
115#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
116#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
117#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
118#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
119#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
120#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
121#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
122#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
123#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
124#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
125#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
126#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
127#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
128
80#define CONFIG_MEMSIZE 0x5428 129#define CONFIG_MEMSIZE 0x5428
81#define CONFIG_CNTL 0x5424 130#define CONFIG_CNTL 0x5424
82#define CP_STAT 0x8680 131#define CP_STAT 0x8680
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 829e26e8a4bb..034218c3dbbb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -91,6 +91,8 @@ extern int radeon_tv;
91extern int radeon_new_pll; 91extern int radeon_new_pll;
92extern int radeon_dynpm; 92extern int radeon_dynpm;
93extern int radeon_audio; 93extern int radeon_audio;
94extern int radeon_disp_priority;
95extern int radeon_hw_i2c;
94 96
95/* 97/*
96 * Copy from radeon_drv.h so we don't have to include both and have conflicting 98 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -168,6 +170,7 @@ struct radeon_clock {
168 * Power management 170 * Power management
169 */ 171 */
170int radeon_pm_init(struct radeon_device *rdev); 172int radeon_pm_init(struct radeon_device *rdev);
173void radeon_pm_fini(struct radeon_device *rdev);
171void radeon_pm_compute_clocks(struct radeon_device *rdev); 174void radeon_pm_compute_clocks(struct radeon_device *rdev);
172void radeon_combios_get_power_modes(struct radeon_device *rdev); 175void radeon_combios_get_power_modes(struct radeon_device *rdev);
173void radeon_atombios_get_power_modes(struct radeon_device *rdev); 176void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -687,6 +690,7 @@ struct radeon_pm {
687 bool downclocked; 690 bool downclocked;
688 int active_crtcs; 691 int active_crtcs;
689 int req_vblank; 692 int req_vblank;
693 bool vblank_sync;
690 fixed20_12 max_bandwidth; 694 fixed20_12 max_bandwidth;
691 fixed20_12 igp_sideport_mclk; 695 fixed20_12 igp_sideport_mclk;
692 fixed20_12 igp_system_mclk; 696 fixed20_12 igp_system_mclk;
@@ -697,6 +701,7 @@ struct radeon_pm {
697 fixed20_12 ht_bandwidth; 701 fixed20_12 ht_bandwidth;
698 fixed20_12 core_bandwidth; 702 fixed20_12 core_bandwidth;
699 fixed20_12 sclk; 703 fixed20_12 sclk;
704 fixed20_12 mclk;
700 fixed20_12 needed_bandwidth; 705 fixed20_12 needed_bandwidth;
701 /* XXX: use a define for num power modes */ 706 /* XXX: use a define for num power modes */
702 struct radeon_power_state power_state[8]; 707 struct radeon_power_state power_state[8];
@@ -707,6 +712,7 @@ struct radeon_pm {
707 struct radeon_power_state *requested_power_state; 712 struct radeon_power_state *requested_power_state;
708 struct radeon_pm_clock_info *requested_clock_mode; 713 struct radeon_pm_clock_info *requested_clock_mode;
709 struct radeon_power_state *default_power_state; 714 struct radeon_power_state *default_power_state;
715 struct radeon_i2c_chan *i2c_bus;
710}; 716};
711 717
712 718
@@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
729 struct drm_info_list *files, 735 struct drm_info_list *files,
730 unsigned nfiles); 736 unsigned nfiles);
731int radeon_debugfs_fence_init(struct radeon_device *rdev); 737int radeon_debugfs_fence_init(struct radeon_device *rdev);
732int r100_debugfs_rbbm_init(struct radeon_device *rdev);
733int r100_debugfs_cp_init(struct radeon_device *rdev);
734 738
735 739
736/* 740/*
@@ -782,7 +786,7 @@ struct radeon_asic {
782 int (*set_surface_reg)(struct radeon_device *rdev, int reg, 786 int (*set_surface_reg)(struct radeon_device *rdev, int reg,
783 uint32_t tiling_flags, uint32_t pitch, 787 uint32_t tiling_flags, uint32_t pitch,
784 uint32_t offset, uint32_t obj_size); 788 uint32_t offset, uint32_t obj_size);
785 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 789 void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
786 void (*bandwidth_update)(struct radeon_device *rdev); 790 void (*bandwidth_update)(struct radeon_device *rdev);
787 void (*hpd_init)(struct radeon_device *rdev); 791 void (*hpd_init)(struct radeon_device *rdev);
788 void (*hpd_fini)(struct radeon_device *rdev); 792 void (*hpd_fini)(struct radeon_device *rdev);
@@ -862,6 +866,12 @@ union radeon_asic_config {
862 struct rv770_asic rv770; 866 struct rv770_asic rv770;
863}; 867};
864 868
869/*
870 * asic initizalization from radeon_asic.c
871 */
872void radeon_agp_disable(struct radeon_device *rdev);
873int radeon_asic_init(struct radeon_device *rdev);
874
865 875
866/* 876/*
867 * IOCTL. 877 * IOCTL.
@@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev);
1172extern int radeon_modeset_init(struct radeon_device *rdev); 1182extern int radeon_modeset_init(struct radeon_device *rdev);
1173extern void radeon_modeset_fini(struct radeon_device *rdev); 1183extern void radeon_modeset_fini(struct radeon_device *rdev);
1174extern bool radeon_card_posted(struct radeon_device *rdev); 1184extern bool radeon_card_posted(struct radeon_device *rdev);
1185extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1186extern void radeon_update_display_priority(struct radeon_device *rdev);
1175extern bool radeon_boot_test_post_card(struct radeon_device *rdev); 1187extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1176extern int radeon_clocks_init(struct radeon_device *rdev); 1188extern int radeon_clocks_init(struct radeon_device *rdev);
1177extern void radeon_clocks_fini(struct radeon_device *rdev); 1189extern void radeon_clocks_fini(struct radeon_device *rdev);
@@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev);
1188extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1200extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1189 1201
1190/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1202/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
1191struct r100_mc_save {
1192 u32 GENMO_WT;
1193 u32 CRTC_EXT_CNTL;
1194 u32 CRTC_GEN_CNTL;
1195 u32 CRTC2_GEN_CNTL;
1196 u32 CUR_OFFSET;
1197 u32 CUR2_OFFSET;
1198};
1199extern void r100_cp_disable(struct radeon_device *rdev);
1200extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
1201extern void r100_cp_fini(struct radeon_device *rdev);
1202extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
1203extern int r100_pci_gart_init(struct radeon_device *rdev);
1204extern void r100_pci_gart_fini(struct radeon_device *rdev);
1205extern int r100_pci_gart_enable(struct radeon_device *rdev);
1206extern void r100_pci_gart_disable(struct radeon_device *rdev);
1207extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
1208extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
1209extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
1210extern void r100_ib_fini(struct radeon_device *rdev);
1211extern int r100_ib_init(struct radeon_device *rdev);
1212extern void r100_irq_disable(struct radeon_device *rdev);
1213extern int r100_irq_set(struct radeon_device *rdev);
1214extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
1215extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
1216extern void r100_vram_init_sizes(struct radeon_device *rdev);
1217extern void r100_wb_disable(struct radeon_device *rdev);
1218extern void r100_wb_fini(struct radeon_device *rdev);
1219extern int r100_wb_init(struct radeon_device *rdev);
1220extern void r100_hdp_reset(struct radeon_device *rdev);
1221extern int r100_rb2d_reset(struct radeon_device *rdev);
1222extern int r100_cp_reset(struct radeon_device *rdev);
1223extern void r100_vga_render_disable(struct radeon_device *rdev);
1224extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1225 struct radeon_cs_packet *pkt,
1226 struct radeon_bo *robj);
1227extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1228 struct radeon_cs_packet *pkt,
1229 const unsigned *auth, unsigned n,
1230 radeon_packet0_check_t check);
1231extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1232 struct radeon_cs_packet *pkt,
1233 unsigned idx);
1234extern void r100_enable_bm(struct radeon_device *rdev);
1235extern void r100_set_common_regs(struct radeon_device *rdev);
1236 1203
1237/* rv200,rv250,rv280 */ 1204/* rv200,rv250,rv280 */
1238extern void r200_set_safe_registers(struct radeon_device *rdev); 1205extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder);
1322extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock); 1289extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
1323extern void r600_audio_fini(struct radeon_device *rdev); 1290extern void r600_audio_fini(struct radeon_device *rdev);
1324extern void r600_hdmi_init(struct drm_encoder *encoder); 1291extern void r600_hdmi_init(struct drm_encoder *encoder);
1325extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable); 1292extern void r600_hdmi_enable(struct drm_encoder *encoder);
1293extern void r600_hdmi_disable(struct drm_encoder *encoder);
1326extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1294extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1327extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder); 1295extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
1328extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, 1296extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c4457791dff1..28e473f1f56f 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -134,12 +134,10 @@ int radeon_agp_init(struct radeon_device *rdev)
134 int ret; 134 int ret;
135 135
136 /* Acquire AGP. */ 136 /* Acquire AGP. */
137 if (!rdev->ddev->agp->acquired) { 137 ret = drm_agp_acquire(rdev->ddev);
138 ret = drm_agp_acquire(rdev->ddev); 138 if (ret) {
139 if (ret) { 139 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
140 DRM_ERROR("Unable to acquire AGP: %d\n", ret); 140 return ret;
141 return ret;
142 }
143 } 141 }
144 142
145 ret = drm_agp_info(rdev->ddev, &info); 143 ret = drm_agp_info(rdev->ddev, &info);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 000000000000..a4b4bc9fa322
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,772 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29#include <linux/console.h>
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
33#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h>
35#include "radeon_reg.h"
36#include "radeon.h"
37#include "radeon_asic.h"
38#include "atom.h"
39
40/*
41 * Registers accessors functions.
42 */
43static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
44{
45 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
46 BUG_ON(1);
47 return 0;
48}
49
50static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
51{
52 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
53 reg, v);
54 BUG_ON(1);
55}
56
57static void radeon_register_accessor_init(struct radeon_device *rdev)
58{
59 rdev->mc_rreg = &radeon_invalid_rreg;
60 rdev->mc_wreg = &radeon_invalid_wreg;
61 rdev->pll_rreg = &radeon_invalid_rreg;
62 rdev->pll_wreg = &radeon_invalid_wreg;
63 rdev->pciep_rreg = &radeon_invalid_rreg;
64 rdev->pciep_wreg = &radeon_invalid_wreg;
65
66 /* Don't change order as we are overridding accessor. */
67 if (rdev->family < CHIP_RV515) {
68 rdev->pcie_reg_mask = 0xff;
69 } else {
70 rdev->pcie_reg_mask = 0x7ff;
71 }
72 /* FIXME: not sure here */
73 if (rdev->family <= CHIP_R580) {
74 rdev->pll_rreg = &r100_pll_rreg;
75 rdev->pll_wreg = &r100_pll_wreg;
76 }
77 if (rdev->family >= CHIP_R420) {
78 rdev->mc_rreg = &r420_mc_rreg;
79 rdev->mc_wreg = &r420_mc_wreg;
80 }
81 if (rdev->family >= CHIP_RV515) {
82 rdev->mc_rreg = &rv515_mc_rreg;
83 rdev->mc_wreg = &rv515_mc_wreg;
84 }
85 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
86 rdev->mc_rreg = &rs400_mc_rreg;
87 rdev->mc_wreg = &rs400_mc_wreg;
88 }
89 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
90 rdev->mc_rreg = &rs690_mc_rreg;
91 rdev->mc_wreg = &rs690_mc_wreg;
92 }
93 if (rdev->family == CHIP_RS600) {
94 rdev->mc_rreg = &rs600_mc_rreg;
95 rdev->mc_wreg = &rs600_mc_wreg;
96 }
97 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
98 rdev->pciep_rreg = &r600_pciep_rreg;
99 rdev->pciep_wreg = &r600_pciep_wreg;
100 }
101}
102
103
104/* helper to disable agp */
105void radeon_agp_disable(struct radeon_device *rdev)
106{
107 rdev->flags &= ~RADEON_IS_AGP;
108 if (rdev->family >= CHIP_R600) {
109 DRM_INFO("Forcing AGP to PCIE mode\n");
110 rdev->flags |= RADEON_IS_PCIE;
111 } else if (rdev->family >= CHIP_RV515 ||
112 rdev->family == CHIP_RV380 ||
113 rdev->family == CHIP_RV410 ||
114 rdev->family == CHIP_R423) {
115 DRM_INFO("Forcing AGP to PCIE mode\n");
116 rdev->flags |= RADEON_IS_PCIE;
117 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
118 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
119 } else {
120 DRM_INFO("Forcing AGP to PCI mode\n");
121 rdev->flags |= RADEON_IS_PCI;
122 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
123 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
124 }
125 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
126}
127
128/*
129 * ASIC
130 */
131static struct radeon_asic r100_asic = {
132 .init = &r100_init,
133 .fini = &r100_fini,
134 .suspend = &r100_suspend,
135 .resume = &r100_resume,
136 .vga_set_state = &r100_vga_set_state,
137 .gpu_reset = &r100_gpu_reset,
138 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
139 .gart_set_page = &r100_pci_gart_set_page,
140 .cp_commit = &r100_cp_commit,
141 .ring_start = &r100_ring_start,
142 .ring_test = &r100_ring_test,
143 .ring_ib_execute = &r100_ring_ib_execute,
144 .irq_set = &r100_irq_set,
145 .irq_process = &r100_irq_process,
146 .get_vblank_counter = &r100_get_vblank_counter,
147 .fence_ring_emit = &r100_fence_ring_emit,
148 .cs_parse = &r100_cs_parse,
149 .copy_blit = &r100_copy_blit,
150 .copy_dma = NULL,
151 .copy = &r100_copy_blit,
152 .get_engine_clock = &radeon_legacy_get_engine_clock,
153 .set_engine_clock = &radeon_legacy_set_engine_clock,
154 .get_memory_clock = &radeon_legacy_get_memory_clock,
155 .set_memory_clock = NULL,
156 .get_pcie_lanes = NULL,
157 .set_pcie_lanes = NULL,
158 .set_clock_gating = &radeon_legacy_set_clock_gating,
159 .set_surface_reg = r100_set_surface_reg,
160 .clear_surface_reg = r100_clear_surface_reg,
161 .bandwidth_update = &r100_bandwidth_update,
162 .hpd_init = &r100_hpd_init,
163 .hpd_fini = &r100_hpd_fini,
164 .hpd_sense = &r100_hpd_sense,
165 .hpd_set_polarity = &r100_hpd_set_polarity,
166 .ioctl_wait_idle = NULL,
167};
168
169static struct radeon_asic r200_asic = {
170 .init = &r100_init,
171 .fini = &r100_fini,
172 .suspend = &r100_suspend,
173 .resume = &r100_resume,
174 .vga_set_state = &r100_vga_set_state,
175 .gpu_reset = &r100_gpu_reset,
176 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
177 .gart_set_page = &r100_pci_gart_set_page,
178 .cp_commit = &r100_cp_commit,
179 .ring_start = &r100_ring_start,
180 .ring_test = &r100_ring_test,
181 .ring_ib_execute = &r100_ring_ib_execute,
182 .irq_set = &r100_irq_set,
183 .irq_process = &r100_irq_process,
184 .get_vblank_counter = &r100_get_vblank_counter,
185 .fence_ring_emit = &r100_fence_ring_emit,
186 .cs_parse = &r100_cs_parse,
187 .copy_blit = &r100_copy_blit,
188 .copy_dma = &r200_copy_dma,
189 .copy = &r100_copy_blit,
190 .get_engine_clock = &radeon_legacy_get_engine_clock,
191 .set_engine_clock = &radeon_legacy_set_engine_clock,
192 .get_memory_clock = &radeon_legacy_get_memory_clock,
193 .set_memory_clock = NULL,
194 .set_pcie_lanes = NULL,
195 .set_clock_gating = &radeon_legacy_set_clock_gating,
196 .set_surface_reg = r100_set_surface_reg,
197 .clear_surface_reg = r100_clear_surface_reg,
198 .bandwidth_update = &r100_bandwidth_update,
199 .hpd_init = &r100_hpd_init,
200 .hpd_fini = &r100_hpd_fini,
201 .hpd_sense = &r100_hpd_sense,
202 .hpd_set_polarity = &r100_hpd_set_polarity,
203 .ioctl_wait_idle = NULL,
204};
205
206static struct radeon_asic r300_asic = {
207 .init = &r300_init,
208 .fini = &r300_fini,
209 .suspend = &r300_suspend,
210 .resume = &r300_resume,
211 .vga_set_state = &r100_vga_set_state,
212 .gpu_reset = &r300_gpu_reset,
213 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
214 .gart_set_page = &r100_pci_gart_set_page,
215 .cp_commit = &r100_cp_commit,
216 .ring_start = &r300_ring_start,
217 .ring_test = &r100_ring_test,
218 .ring_ib_execute = &r100_ring_ib_execute,
219 .irq_set = &r100_irq_set,
220 .irq_process = &r100_irq_process,
221 .get_vblank_counter = &r100_get_vblank_counter,
222 .fence_ring_emit = &r300_fence_ring_emit,
223 .cs_parse = &r300_cs_parse,
224 .copy_blit = &r100_copy_blit,
225 .copy_dma = &r200_copy_dma,
226 .copy = &r100_copy_blit,
227 .get_engine_clock = &radeon_legacy_get_engine_clock,
228 .set_engine_clock = &radeon_legacy_set_engine_clock,
229 .get_memory_clock = &radeon_legacy_get_memory_clock,
230 .set_memory_clock = NULL,
231 .get_pcie_lanes = &rv370_get_pcie_lanes,
232 .set_pcie_lanes = &rv370_set_pcie_lanes,
233 .set_clock_gating = &radeon_legacy_set_clock_gating,
234 .set_surface_reg = r100_set_surface_reg,
235 .clear_surface_reg = r100_clear_surface_reg,
236 .bandwidth_update = &r100_bandwidth_update,
237 .hpd_init = &r100_hpd_init,
238 .hpd_fini = &r100_hpd_fini,
239 .hpd_sense = &r100_hpd_sense,
240 .hpd_set_polarity = &r100_hpd_set_polarity,
241 .ioctl_wait_idle = NULL,
242};
243
244static struct radeon_asic r300_asic_pcie = {
245 .init = &r300_init,
246 .fini = &r300_fini,
247 .suspend = &r300_suspend,
248 .resume = &r300_resume,
249 .vga_set_state = &r100_vga_set_state,
250 .gpu_reset = &r300_gpu_reset,
251 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
252 .gart_set_page = &rv370_pcie_gart_set_page,
253 .cp_commit = &r100_cp_commit,
254 .ring_start = &r300_ring_start,
255 .ring_test = &r100_ring_test,
256 .ring_ib_execute = &r100_ring_ib_execute,
257 .irq_set = &r100_irq_set,
258 .irq_process = &r100_irq_process,
259 .get_vblank_counter = &r100_get_vblank_counter,
260 .fence_ring_emit = &r300_fence_ring_emit,
261 .cs_parse = &r300_cs_parse,
262 .copy_blit = &r100_copy_blit,
263 .copy_dma = &r200_copy_dma,
264 .copy = &r100_copy_blit,
265 .get_engine_clock = &radeon_legacy_get_engine_clock,
266 .set_engine_clock = &radeon_legacy_set_engine_clock,
267 .get_memory_clock = &radeon_legacy_get_memory_clock,
268 .set_memory_clock = NULL,
269 .set_pcie_lanes = &rv370_set_pcie_lanes,
270 .set_clock_gating = &radeon_legacy_set_clock_gating,
271 .set_surface_reg = r100_set_surface_reg,
272 .clear_surface_reg = r100_clear_surface_reg,
273 .bandwidth_update = &r100_bandwidth_update,
274 .hpd_init = &r100_hpd_init,
275 .hpd_fini = &r100_hpd_fini,
276 .hpd_sense = &r100_hpd_sense,
277 .hpd_set_polarity = &r100_hpd_set_polarity,
278 .ioctl_wait_idle = NULL,
279};
280
281static struct radeon_asic r420_asic = {
282 .init = &r420_init,
283 .fini = &r420_fini,
284 .suspend = &r420_suspend,
285 .resume = &r420_resume,
286 .vga_set_state = &r100_vga_set_state,
287 .gpu_reset = &r300_gpu_reset,
288 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
289 .gart_set_page = &rv370_pcie_gart_set_page,
290 .cp_commit = &r100_cp_commit,
291 .ring_start = &r300_ring_start,
292 .ring_test = &r100_ring_test,
293 .ring_ib_execute = &r100_ring_ib_execute,
294 .irq_set = &r100_irq_set,
295 .irq_process = &r100_irq_process,
296 .get_vblank_counter = &r100_get_vblank_counter,
297 .fence_ring_emit = &r300_fence_ring_emit,
298 .cs_parse = &r300_cs_parse,
299 .copy_blit = &r100_copy_blit,
300 .copy_dma = &r200_copy_dma,
301 .copy = &r100_copy_blit,
302 .get_engine_clock = &radeon_atom_get_engine_clock,
303 .set_engine_clock = &radeon_atom_set_engine_clock,
304 .get_memory_clock = &radeon_atom_get_memory_clock,
305 .set_memory_clock = &radeon_atom_set_memory_clock,
306 .get_pcie_lanes = &rv370_get_pcie_lanes,
307 .set_pcie_lanes = &rv370_set_pcie_lanes,
308 .set_clock_gating = &radeon_atom_set_clock_gating,
309 .set_surface_reg = r100_set_surface_reg,
310 .clear_surface_reg = r100_clear_surface_reg,
311 .bandwidth_update = &r100_bandwidth_update,
312 .hpd_init = &r100_hpd_init,
313 .hpd_fini = &r100_hpd_fini,
314 .hpd_sense = &r100_hpd_sense,
315 .hpd_set_polarity = &r100_hpd_set_polarity,
316 .ioctl_wait_idle = NULL,
317};
318
319static struct radeon_asic rs400_asic = {
320 .init = &rs400_init,
321 .fini = &rs400_fini,
322 .suspend = &rs400_suspend,
323 .resume = &rs400_resume,
324 .vga_set_state = &r100_vga_set_state,
325 .gpu_reset = &r300_gpu_reset,
326 .gart_tlb_flush = &rs400_gart_tlb_flush,
327 .gart_set_page = &rs400_gart_set_page,
328 .cp_commit = &r100_cp_commit,
329 .ring_start = &r300_ring_start,
330 .ring_test = &r100_ring_test,
331 .ring_ib_execute = &r100_ring_ib_execute,
332 .irq_set = &r100_irq_set,
333 .irq_process = &r100_irq_process,
334 .get_vblank_counter = &r100_get_vblank_counter,
335 .fence_ring_emit = &r300_fence_ring_emit,
336 .cs_parse = &r300_cs_parse,
337 .copy_blit = &r100_copy_blit,
338 .copy_dma = &r200_copy_dma,
339 .copy = &r100_copy_blit,
340 .get_engine_clock = &radeon_legacy_get_engine_clock,
341 .set_engine_clock = &radeon_legacy_set_engine_clock,
342 .get_memory_clock = &radeon_legacy_get_memory_clock,
343 .set_memory_clock = NULL,
344 .get_pcie_lanes = NULL,
345 .set_pcie_lanes = NULL,
346 .set_clock_gating = &radeon_legacy_set_clock_gating,
347 .set_surface_reg = r100_set_surface_reg,
348 .clear_surface_reg = r100_clear_surface_reg,
349 .bandwidth_update = &r100_bandwidth_update,
350 .hpd_init = &r100_hpd_init,
351 .hpd_fini = &r100_hpd_fini,
352 .hpd_sense = &r100_hpd_sense,
353 .hpd_set_polarity = &r100_hpd_set_polarity,
354 .ioctl_wait_idle = NULL,
355};
356
357static struct radeon_asic rs600_asic = {
358 .init = &rs600_init,
359 .fini = &rs600_fini,
360 .suspend = &rs600_suspend,
361 .resume = &rs600_resume,
362 .vga_set_state = &r100_vga_set_state,
363 .gpu_reset = &r300_gpu_reset,
364 .gart_tlb_flush = &rs600_gart_tlb_flush,
365 .gart_set_page = &rs600_gart_set_page,
366 .cp_commit = &r100_cp_commit,
367 .ring_start = &r300_ring_start,
368 .ring_test = &r100_ring_test,
369 .ring_ib_execute = &r100_ring_ib_execute,
370 .irq_set = &rs600_irq_set,
371 .irq_process = &rs600_irq_process,
372 .get_vblank_counter = &rs600_get_vblank_counter,
373 .fence_ring_emit = &r300_fence_ring_emit,
374 .cs_parse = &r300_cs_parse,
375 .copy_blit = &r100_copy_blit,
376 .copy_dma = &r200_copy_dma,
377 .copy = &r100_copy_blit,
378 .get_engine_clock = &radeon_atom_get_engine_clock,
379 .set_engine_clock = &radeon_atom_set_engine_clock,
380 .get_memory_clock = &radeon_atom_get_memory_clock,
381 .set_memory_clock = &radeon_atom_set_memory_clock,
382 .get_pcie_lanes = NULL,
383 .set_pcie_lanes = NULL,
384 .set_clock_gating = &radeon_atom_set_clock_gating,
385 .set_surface_reg = r100_set_surface_reg,
386 .clear_surface_reg = r100_clear_surface_reg,
387 .bandwidth_update = &rs600_bandwidth_update,
388 .hpd_init = &rs600_hpd_init,
389 .hpd_fini = &rs600_hpd_fini,
390 .hpd_sense = &rs600_hpd_sense,
391 .hpd_set_polarity = &rs600_hpd_set_polarity,
392 .ioctl_wait_idle = NULL,
393};
394
395static struct radeon_asic rs690_asic = {
396 .init = &rs690_init,
397 .fini = &rs690_fini,
398 .suspend = &rs690_suspend,
399 .resume = &rs690_resume,
400 .vga_set_state = &r100_vga_set_state,
401 .gpu_reset = &r300_gpu_reset,
402 .gart_tlb_flush = &rs400_gart_tlb_flush,
403 .gart_set_page = &rs400_gart_set_page,
404 .cp_commit = &r100_cp_commit,
405 .ring_start = &r300_ring_start,
406 .ring_test = &r100_ring_test,
407 .ring_ib_execute = &r100_ring_ib_execute,
408 .irq_set = &rs600_irq_set,
409 .irq_process = &rs600_irq_process,
410 .get_vblank_counter = &rs600_get_vblank_counter,
411 .fence_ring_emit = &r300_fence_ring_emit,
412 .cs_parse = &r300_cs_parse,
413 .copy_blit = &r100_copy_blit,
414 .copy_dma = &r200_copy_dma,
415 .copy = &r200_copy_dma,
416 .get_engine_clock = &radeon_atom_get_engine_clock,
417 .set_engine_clock = &radeon_atom_set_engine_clock,
418 .get_memory_clock = &radeon_atom_get_memory_clock,
419 .set_memory_clock = &radeon_atom_set_memory_clock,
420 .get_pcie_lanes = NULL,
421 .set_pcie_lanes = NULL,
422 .set_clock_gating = &radeon_atom_set_clock_gating,
423 .set_surface_reg = r100_set_surface_reg,
424 .clear_surface_reg = r100_clear_surface_reg,
425 .bandwidth_update = &rs690_bandwidth_update,
426 .hpd_init = &rs600_hpd_init,
427 .hpd_fini = &rs600_hpd_fini,
428 .hpd_sense = &rs600_hpd_sense,
429 .hpd_set_polarity = &rs600_hpd_set_polarity,
430 .ioctl_wait_idle = NULL,
431};
432
433static struct radeon_asic rv515_asic = {
434 .init = &rv515_init,
435 .fini = &rv515_fini,
436 .suspend = &rv515_suspend,
437 .resume = &rv515_resume,
438 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset,
440 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
441 .gart_set_page = &rv370_pcie_gart_set_page,
442 .cp_commit = &r100_cp_commit,
443 .ring_start = &rv515_ring_start,
444 .ring_test = &r100_ring_test,
445 .ring_ib_execute = &r100_ring_ib_execute,
446 .irq_set = &rs600_irq_set,
447 .irq_process = &rs600_irq_process,
448 .get_vblank_counter = &rs600_get_vblank_counter,
449 .fence_ring_emit = &r300_fence_ring_emit,
450 .cs_parse = &r300_cs_parse,
451 .copy_blit = &r100_copy_blit,
452 .copy_dma = &r200_copy_dma,
453 .copy = &r100_copy_blit,
454 .get_engine_clock = &radeon_atom_get_engine_clock,
455 .set_engine_clock = &radeon_atom_set_engine_clock,
456 .get_memory_clock = &radeon_atom_get_memory_clock,
457 .set_memory_clock = &radeon_atom_set_memory_clock,
458 .get_pcie_lanes = &rv370_get_pcie_lanes,
459 .set_pcie_lanes = &rv370_set_pcie_lanes,
460 .set_clock_gating = &radeon_atom_set_clock_gating,
461 .set_surface_reg = r100_set_surface_reg,
462 .clear_surface_reg = r100_clear_surface_reg,
463 .bandwidth_update = &rv515_bandwidth_update,
464 .hpd_init = &rs600_hpd_init,
465 .hpd_fini = &rs600_hpd_fini,
466 .hpd_sense = &rs600_hpd_sense,
467 .hpd_set_polarity = &rs600_hpd_set_polarity,
468 .ioctl_wait_idle = NULL,
469};
470
471static struct radeon_asic r520_asic = {
472 .init = &r520_init,
473 .fini = &rv515_fini,
474 .suspend = &rv515_suspend,
475 .resume = &r520_resume,
476 .vga_set_state = &r100_vga_set_state,
477 .gpu_reset = &rv515_gpu_reset,
478 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
479 .gart_set_page = &rv370_pcie_gart_set_page,
480 .cp_commit = &r100_cp_commit,
481 .ring_start = &rv515_ring_start,
482 .ring_test = &r100_ring_test,
483 .ring_ib_execute = &r100_ring_ib_execute,
484 .irq_set = &rs600_irq_set,
485 .irq_process = &rs600_irq_process,
486 .get_vblank_counter = &rs600_get_vblank_counter,
487 .fence_ring_emit = &r300_fence_ring_emit,
488 .cs_parse = &r300_cs_parse,
489 .copy_blit = &r100_copy_blit,
490 .copy_dma = &r200_copy_dma,
491 .copy = &r100_copy_blit,
492 .get_engine_clock = &radeon_atom_get_engine_clock,
493 .set_engine_clock = &radeon_atom_set_engine_clock,
494 .get_memory_clock = &radeon_atom_get_memory_clock,
495 .set_memory_clock = &radeon_atom_set_memory_clock,
496 .get_pcie_lanes = &rv370_get_pcie_lanes,
497 .set_pcie_lanes = &rv370_set_pcie_lanes,
498 .set_clock_gating = &radeon_atom_set_clock_gating,
499 .set_surface_reg = r100_set_surface_reg,
500 .clear_surface_reg = r100_clear_surface_reg,
501 .bandwidth_update = &rv515_bandwidth_update,
502 .hpd_init = &rs600_hpd_init,
503 .hpd_fini = &rs600_hpd_fini,
504 .hpd_sense = &rs600_hpd_sense,
505 .hpd_set_polarity = &rs600_hpd_set_polarity,
506 .ioctl_wait_idle = NULL,
507};
508
509static struct radeon_asic r600_asic = {
510 .init = &r600_init,
511 .fini = &r600_fini,
512 .suspend = &r600_suspend,
513 .resume = &r600_resume,
514 .cp_commit = &r600_cp_commit,
515 .vga_set_state = &r600_vga_set_state,
516 .gpu_reset = &r600_gpu_reset,
517 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
518 .gart_set_page = &rs600_gart_set_page,
519 .ring_test = &r600_ring_test,
520 .ring_ib_execute = &r600_ring_ib_execute,
521 .irq_set = &r600_irq_set,
522 .irq_process = &r600_irq_process,
523 .get_vblank_counter = &rs600_get_vblank_counter,
524 .fence_ring_emit = &r600_fence_ring_emit,
525 .cs_parse = &r600_cs_parse,
526 .copy_blit = &r600_copy_blit,
527 .copy_dma = &r600_copy_blit,
528 .copy = &r600_copy_blit,
529 .get_engine_clock = &radeon_atom_get_engine_clock,
530 .set_engine_clock = &radeon_atom_set_engine_clock,
531 .get_memory_clock = &radeon_atom_get_memory_clock,
532 .set_memory_clock = &radeon_atom_set_memory_clock,
533 .get_pcie_lanes = &rv370_get_pcie_lanes,
534 .set_pcie_lanes = NULL,
535 .set_clock_gating = NULL,
536 .set_surface_reg = r600_set_surface_reg,
537 .clear_surface_reg = r600_clear_surface_reg,
538 .bandwidth_update = &rv515_bandwidth_update,
539 .hpd_init = &r600_hpd_init,
540 .hpd_fini = &r600_hpd_fini,
541 .hpd_sense = &r600_hpd_sense,
542 .hpd_set_polarity = &r600_hpd_set_polarity,
543 .ioctl_wait_idle = r600_ioctl_wait_idle,
544};
545
546static struct radeon_asic rs780_asic = {
547 .init = &r600_init,
548 .fini = &r600_fini,
549 .suspend = &r600_suspend,
550 .resume = &r600_resume,
551 .cp_commit = &r600_cp_commit,
552 .vga_set_state = &r600_vga_set_state,
553 .gpu_reset = &r600_gpu_reset,
554 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
555 .gart_set_page = &rs600_gart_set_page,
556 .ring_test = &r600_ring_test,
557 .ring_ib_execute = &r600_ring_ib_execute,
558 .irq_set = &r600_irq_set,
559 .irq_process = &r600_irq_process,
560 .get_vblank_counter = &rs600_get_vblank_counter,
561 .fence_ring_emit = &r600_fence_ring_emit,
562 .cs_parse = &r600_cs_parse,
563 .copy_blit = &r600_copy_blit,
564 .copy_dma = &r600_copy_blit,
565 .copy = &r600_copy_blit,
566 .get_engine_clock = &radeon_atom_get_engine_clock,
567 .set_engine_clock = &radeon_atom_set_engine_clock,
568 .get_memory_clock = NULL,
569 .set_memory_clock = NULL,
570 .get_pcie_lanes = NULL,
571 .set_pcie_lanes = NULL,
572 .set_clock_gating = NULL,
573 .set_surface_reg = r600_set_surface_reg,
574 .clear_surface_reg = r600_clear_surface_reg,
575 .bandwidth_update = &rs690_bandwidth_update,
576 .hpd_init = &r600_hpd_init,
577 .hpd_fini = &r600_hpd_fini,
578 .hpd_sense = &r600_hpd_sense,
579 .hpd_set_polarity = &r600_hpd_set_polarity,
580 .ioctl_wait_idle = r600_ioctl_wait_idle,
581};
582
583static struct radeon_asic rv770_asic = {
584 .init = &rv770_init,
585 .fini = &rv770_fini,
586 .suspend = &rv770_suspend,
587 .resume = &rv770_resume,
588 .cp_commit = &r600_cp_commit,
589 .gpu_reset = &rv770_gpu_reset,
590 .vga_set_state = &r600_vga_set_state,
591 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
592 .gart_set_page = &rs600_gart_set_page,
593 .ring_test = &r600_ring_test,
594 .ring_ib_execute = &r600_ring_ib_execute,
595 .irq_set = &r600_irq_set,
596 .irq_process = &r600_irq_process,
597 .get_vblank_counter = &rs600_get_vblank_counter,
598 .fence_ring_emit = &r600_fence_ring_emit,
599 .cs_parse = &r600_cs_parse,
600 .copy_blit = &r600_copy_blit,
601 .copy_dma = &r600_copy_blit,
602 .copy = &r600_copy_blit,
603 .get_engine_clock = &radeon_atom_get_engine_clock,
604 .set_engine_clock = &radeon_atom_set_engine_clock,
605 .get_memory_clock = &radeon_atom_get_memory_clock,
606 .set_memory_clock = &radeon_atom_set_memory_clock,
607 .get_pcie_lanes = &rv370_get_pcie_lanes,
608 .set_pcie_lanes = NULL,
609 .set_clock_gating = &radeon_atom_set_clock_gating,
610 .set_surface_reg = r600_set_surface_reg,
611 .clear_surface_reg = r600_clear_surface_reg,
612 .bandwidth_update = &rv515_bandwidth_update,
613 .hpd_init = &r600_hpd_init,
614 .hpd_fini = &r600_hpd_fini,
615 .hpd_sense = &r600_hpd_sense,
616 .hpd_set_polarity = &r600_hpd_set_polarity,
617 .ioctl_wait_idle = r600_ioctl_wait_idle,
618};
619
620static struct radeon_asic evergreen_asic = {
621 .init = &evergreen_init,
622 .fini = &evergreen_fini,
623 .suspend = &evergreen_suspend,
624 .resume = &evergreen_resume,
625 .cp_commit = NULL,
626 .gpu_reset = &evergreen_gpu_reset,
627 .vga_set_state = &r600_vga_set_state,
628 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
629 .gart_set_page = &rs600_gart_set_page,
630 .ring_test = NULL,
631 .ring_ib_execute = NULL,
632 .irq_set = NULL,
633 .irq_process = NULL,
634 .get_vblank_counter = NULL,
635 .fence_ring_emit = NULL,
636 .cs_parse = NULL,
637 .copy_blit = NULL,
638 .copy_dma = NULL,
639 .copy = NULL,
640 .get_engine_clock = &radeon_atom_get_engine_clock,
641 .set_engine_clock = &radeon_atom_set_engine_clock,
642 .get_memory_clock = &radeon_atom_get_memory_clock,
643 .set_memory_clock = &radeon_atom_set_memory_clock,
644 .set_pcie_lanes = NULL,
645 .set_clock_gating = NULL,
646 .set_surface_reg = r600_set_surface_reg,
647 .clear_surface_reg = r600_clear_surface_reg,
648 .bandwidth_update = &evergreen_bandwidth_update,
649 .hpd_init = &evergreen_hpd_init,
650 .hpd_fini = &evergreen_hpd_fini,
651 .hpd_sense = &evergreen_hpd_sense,
652 .hpd_set_polarity = &evergreen_hpd_set_polarity,
653};
654
655int radeon_asic_init(struct radeon_device *rdev)
656{
657 radeon_register_accessor_init(rdev);
658 switch (rdev->family) {
659 case CHIP_R100:
660 case CHIP_RV100:
661 case CHIP_RS100:
662 case CHIP_RV200:
663 case CHIP_RS200:
664 rdev->asic = &r100_asic;
665 break;
666 case CHIP_R200:
667 case CHIP_RV250:
668 case CHIP_RS300:
669 case CHIP_RV280:
670 rdev->asic = &r200_asic;
671 break;
672 case CHIP_R300:
673 case CHIP_R350:
674 case CHIP_RV350:
675 case CHIP_RV380:
676 if (rdev->flags & RADEON_IS_PCIE)
677 rdev->asic = &r300_asic_pcie;
678 else
679 rdev->asic = &r300_asic;
680 break;
681 case CHIP_R420:
682 case CHIP_R423:
683 case CHIP_RV410:
684 rdev->asic = &r420_asic;
685 break;
686 case CHIP_RS400:
687 case CHIP_RS480:
688 rdev->asic = &rs400_asic;
689 break;
690 case CHIP_RS600:
691 rdev->asic = &rs600_asic;
692 break;
693 case CHIP_RS690:
694 case CHIP_RS740:
695 rdev->asic = &rs690_asic;
696 break;
697 case CHIP_RV515:
698 rdev->asic = &rv515_asic;
699 break;
700 case CHIP_R520:
701 case CHIP_RV530:
702 case CHIP_RV560:
703 case CHIP_RV570:
704 case CHIP_R580:
705 rdev->asic = &r520_asic;
706 break;
707 case CHIP_R600:
708 case CHIP_RV610:
709 case CHIP_RV630:
710 case CHIP_RV620:
711 case CHIP_RV635:
712 case CHIP_RV670:
713 rdev->asic = &r600_asic;
714 break;
715 case CHIP_RS780:
716 case CHIP_RS880:
717 rdev->asic = &rs780_asic;
718 break;
719 case CHIP_RV770:
720 case CHIP_RV730:
721 case CHIP_RV710:
722 case CHIP_RV740:
723 rdev->asic = &rv770_asic;
724 break;
725 case CHIP_CEDAR:
726 case CHIP_REDWOOD:
727 case CHIP_JUNIPER:
728 case CHIP_CYPRESS:
729 case CHIP_HEMLOCK:
730 rdev->asic = &evergreen_asic;
731 break;
732 default:
733 /* FIXME: not supported yet */
734 return -EINVAL;
735 }
736
737 if (rdev->flags & RADEON_IS_IGP) {
738 rdev->asic->get_memory_clock = NULL;
739 rdev->asic->set_memory_clock = NULL;
740 }
741
742 /* set the number of crtcs */
743 if (rdev->flags & RADEON_SINGLE_CRTC)
744 rdev->num_crtc = 1;
745 else {
746 if (ASIC_IS_DCE4(rdev))
747 rdev->num_crtc = 6;
748 else
749 rdev->num_crtc = 2;
750 }
751
752 return 0;
753}
754
755/*
756 * Wrapper around modesetting bits. Move to radeon_clocks.c?
757 */
758int radeon_clocks_init(struct radeon_device *rdev)
759{
760 int r;
761
762 r = radeon_static_clocks_init(rdev->ddev);
763 if (r) {
764 return r;
765 }
766 DRM_INFO("Clocks initialized !\n");
767 return 0;
768}
769
770void radeon_clocks_fini(struct radeon_device *rdev)
771{
772}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d3a157b2bcb7..a0b8280663d1 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
45/* 45/*
46 * r100,rv100,rs100,rv200,rs200 46 * r100,rv100,rs100,rv200,rs200
47 */ 47 */
48extern int r100_init(struct radeon_device *rdev); 48struct r100_mc_save {
49extern void r100_fini(struct radeon_device *rdev); 49 u32 GENMO_WT;
50extern int r100_suspend(struct radeon_device *rdev); 50 u32 CRTC_EXT_CNTL;
51extern int r100_resume(struct radeon_device *rdev); 51 u32 CRTC_GEN_CNTL;
52 u32 CRTC2_GEN_CNTL;
53 u32 CUR_OFFSET;
54 u32 CUR2_OFFSET;
55};
56int r100_init(struct radeon_device *rdev);
57void r100_fini(struct radeon_device *rdev);
58int r100_suspend(struct radeon_device *rdev);
59int r100_resume(struct radeon_device *rdev);
52uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 60uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
53void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 61void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
54void r100_vga_set_state(struct radeon_device *rdev, bool state); 62void r100_vga_set_state(struct radeon_device *rdev, bool state);
@@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev,
73int r100_set_surface_reg(struct radeon_device *rdev, int reg, 81int r100_set_surface_reg(struct radeon_device *rdev, int reg,
74 uint32_t tiling_flags, uint32_t pitch, 82 uint32_t tiling_flags, uint32_t pitch,
75 uint32_t offset, uint32_t obj_size); 83 uint32_t offset, uint32_t obj_size);
76int r100_clear_surface_reg(struct radeon_device *rdev, int reg); 84void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
77void r100_bandwidth_update(struct radeon_device *rdev); 85void r100_bandwidth_update(struct radeon_device *rdev);
78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 86void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
79int r100_ring_test(struct radeon_device *rdev); 87int r100_ring_test(struct radeon_device *rdev);
@@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev);
82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 90bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
83void r100_hpd_set_polarity(struct radeon_device *rdev, 91void r100_hpd_set_polarity(struct radeon_device *rdev,
84 enum radeon_hpd_id hpd); 92 enum radeon_hpd_id hpd);
85 93int r100_debugfs_rbbm_init(struct radeon_device *rdev);
86static struct radeon_asic r100_asic = { 94int r100_debugfs_cp_init(struct radeon_device *rdev);
87 .init = &r100_init, 95void r100_cp_disable(struct radeon_device *rdev);
88 .fini = &r100_fini, 96int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
89 .suspend = &r100_suspend, 97void r100_cp_fini(struct radeon_device *rdev);
90 .resume = &r100_resume, 98int r100_pci_gart_init(struct radeon_device *rdev);
91 .vga_set_state = &r100_vga_set_state, 99void r100_pci_gart_fini(struct radeon_device *rdev);
92 .gpu_reset = &r100_gpu_reset, 100int r100_pci_gart_enable(struct radeon_device *rdev);
93 .gart_tlb_flush = &r100_pci_gart_tlb_flush, 101void r100_pci_gart_disable(struct radeon_device *rdev);
94 .gart_set_page = &r100_pci_gart_set_page, 102int r100_debugfs_mc_info_init(struct radeon_device *rdev);
95 .cp_commit = &r100_cp_commit, 103int r100_gui_wait_for_idle(struct radeon_device *rdev);
96 .ring_start = &r100_ring_start, 104void r100_ib_fini(struct radeon_device *rdev);
97 .ring_test = &r100_ring_test, 105int r100_ib_init(struct radeon_device *rdev);
98 .ring_ib_execute = &r100_ring_ib_execute, 106void r100_irq_disable(struct radeon_device *rdev);
99 .irq_set = &r100_irq_set, 107void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
100 .irq_process = &r100_irq_process, 108void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
101 .get_vblank_counter = &r100_get_vblank_counter, 109void r100_vram_init_sizes(struct radeon_device *rdev);
102 .fence_ring_emit = &r100_fence_ring_emit, 110void r100_wb_disable(struct radeon_device *rdev);
103 .cs_parse = &r100_cs_parse, 111void r100_wb_fini(struct radeon_device *rdev);
104 .copy_blit = &r100_copy_blit, 112int r100_wb_init(struct radeon_device *rdev);
105 .copy_dma = NULL, 113void r100_hdp_reset(struct radeon_device *rdev);
106 .copy = &r100_copy_blit, 114int r100_rb2d_reset(struct radeon_device *rdev);
107 .get_engine_clock = &radeon_legacy_get_engine_clock, 115int r100_cp_reset(struct radeon_device *rdev);
108 .set_engine_clock = &radeon_legacy_set_engine_clock, 116void r100_vga_render_disable(struct radeon_device *rdev);
109 .get_memory_clock = &radeon_legacy_get_memory_clock, 117int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
110 .set_memory_clock = NULL, 118 struct radeon_cs_packet *pkt,
111 .get_pcie_lanes = NULL, 119 struct radeon_bo *robj);
112 .set_pcie_lanes = NULL, 120int r100_cs_parse_packet0(struct radeon_cs_parser *p,
113 .set_clock_gating = &radeon_legacy_set_clock_gating, 121 struct radeon_cs_packet *pkt,
114 .set_surface_reg = r100_set_surface_reg, 122 const unsigned *auth, unsigned n,
115 .clear_surface_reg = r100_clear_surface_reg, 123 radeon_packet0_check_t check);
116 .bandwidth_update = &r100_bandwidth_update, 124int r100_cs_packet_parse(struct radeon_cs_parser *p,
117 .hpd_init = &r100_hpd_init, 125 struct radeon_cs_packet *pkt,
118 .hpd_fini = &r100_hpd_fini, 126 unsigned idx);
119 .hpd_sense = &r100_hpd_sense, 127void r100_enable_bm(struct radeon_device *rdev);
120 .hpd_set_polarity = &r100_hpd_set_polarity, 128void r100_set_common_regs(struct radeon_device *rdev);
121 .ioctl_wait_idle = NULL,
122};
123 129
124/* 130/*
125 * r200,rv250,rs300,rv280 131 * r200,rv250,rs300,rv280
@@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev,
129 uint64_t dst_offset, 135 uint64_t dst_offset,
130 unsigned num_pages, 136 unsigned num_pages,
131 struct radeon_fence *fence); 137 struct radeon_fence *fence);
132static struct radeon_asic r200_asic = {
133 .init = &r100_init,
134 .fini = &r100_fini,
135 .suspend = &r100_suspend,
136 .resume = &r100_resume,
137 .vga_set_state = &r100_vga_set_state,
138 .gpu_reset = &r100_gpu_reset,
139 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
140 .gart_set_page = &r100_pci_gart_set_page,
141 .cp_commit = &r100_cp_commit,
142 .ring_start = &r100_ring_start,
143 .ring_test = &r100_ring_test,
144 .ring_ib_execute = &r100_ring_ib_execute,
145 .irq_set = &r100_irq_set,
146 .irq_process = &r100_irq_process,
147 .get_vblank_counter = &r100_get_vblank_counter,
148 .fence_ring_emit = &r100_fence_ring_emit,
149 .cs_parse = &r100_cs_parse,
150 .copy_blit = &r100_copy_blit,
151 .copy_dma = &r200_copy_dma,
152 .copy = &r100_copy_blit,
153 .get_engine_clock = &radeon_legacy_get_engine_clock,
154 .set_engine_clock = &radeon_legacy_set_engine_clock,
155 .get_memory_clock = &radeon_legacy_get_memory_clock,
156 .set_memory_clock = NULL,
157 .set_pcie_lanes = NULL,
158 .set_clock_gating = &radeon_legacy_set_clock_gating,
159 .set_surface_reg = r100_set_surface_reg,
160 .clear_surface_reg = r100_clear_surface_reg,
161 .bandwidth_update = &r100_bandwidth_update,
162 .hpd_init = &r100_hpd_init,
163 .hpd_fini = &r100_hpd_fini,
164 .hpd_sense = &r100_hpd_sense,
165 .hpd_set_polarity = &r100_hpd_set_polarity,
166 .ioctl_wait_idle = NULL,
167};
168
169 138
170/* 139/*
171 * r300,r350,rv350,rv380 140 * r300,r350,rv350,rv380
@@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v
186extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); 155extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
187extern int rv370_get_pcie_lanes(struct radeon_device *rdev); 156extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
188 157
189static struct radeon_asic r300_asic = {
190 .init = &r300_init,
191 .fini = &r300_fini,
192 .suspend = &r300_suspend,
193 .resume = &r300_resume,
194 .vga_set_state = &r100_vga_set_state,
195 .gpu_reset = &r300_gpu_reset,
196 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
197 .gart_set_page = &r100_pci_gart_set_page,
198 .cp_commit = &r100_cp_commit,
199 .ring_start = &r300_ring_start,
200 .ring_test = &r100_ring_test,
201 .ring_ib_execute = &r100_ring_ib_execute,
202 .irq_set = &r100_irq_set,
203 .irq_process = &r100_irq_process,
204 .get_vblank_counter = &r100_get_vblank_counter,
205 .fence_ring_emit = &r300_fence_ring_emit,
206 .cs_parse = &r300_cs_parse,
207 .copy_blit = &r100_copy_blit,
208 .copy_dma = &r200_copy_dma,
209 .copy = &r100_copy_blit,
210 .get_engine_clock = &radeon_legacy_get_engine_clock,
211 .set_engine_clock = &radeon_legacy_set_engine_clock,
212 .get_memory_clock = &radeon_legacy_get_memory_clock,
213 .set_memory_clock = NULL,
214 .get_pcie_lanes = &rv370_get_pcie_lanes,
215 .set_pcie_lanes = &rv370_set_pcie_lanes,
216 .set_clock_gating = &radeon_legacy_set_clock_gating,
217 .set_surface_reg = r100_set_surface_reg,
218 .clear_surface_reg = r100_clear_surface_reg,
219 .bandwidth_update = &r100_bandwidth_update,
220 .hpd_init = &r100_hpd_init,
221 .hpd_fini = &r100_hpd_fini,
222 .hpd_sense = &r100_hpd_sense,
223 .hpd_set_polarity = &r100_hpd_set_polarity,
224 .ioctl_wait_idle = NULL,
225};
226
227
228static struct radeon_asic r300_asic_pcie = {
229 .init = &r300_init,
230 .fini = &r300_fini,
231 .suspend = &r300_suspend,
232 .resume = &r300_resume,
233 .vga_set_state = &r100_vga_set_state,
234 .gpu_reset = &r300_gpu_reset,
235 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
236 .gart_set_page = &rv370_pcie_gart_set_page,
237 .cp_commit = &r100_cp_commit,
238 .ring_start = &r300_ring_start,
239 .ring_test = &r100_ring_test,
240 .ring_ib_execute = &r100_ring_ib_execute,
241 .irq_set = &r100_irq_set,
242 .irq_process = &r100_irq_process,
243 .get_vblank_counter = &r100_get_vblank_counter,
244 .fence_ring_emit = &r300_fence_ring_emit,
245 .cs_parse = &r300_cs_parse,
246 .copy_blit = &r100_copy_blit,
247 .copy_dma = &r200_copy_dma,
248 .copy = &r100_copy_blit,
249 .get_engine_clock = &radeon_legacy_get_engine_clock,
250 .set_engine_clock = &radeon_legacy_set_engine_clock,
251 .get_memory_clock = &radeon_legacy_get_memory_clock,
252 .set_memory_clock = NULL,
253 .set_pcie_lanes = &rv370_set_pcie_lanes,
254 .set_clock_gating = &radeon_legacy_set_clock_gating,
255 .set_surface_reg = r100_set_surface_reg,
256 .clear_surface_reg = r100_clear_surface_reg,
257 .bandwidth_update = &r100_bandwidth_update,
258 .hpd_init = &r100_hpd_init,
259 .hpd_fini = &r100_hpd_fini,
260 .hpd_sense = &r100_hpd_sense,
261 .hpd_set_polarity = &r100_hpd_set_polarity,
262 .ioctl_wait_idle = NULL,
263};
264
265/* 158/*
266 * r420,r423,rv410 159 * r420,r423,rv410
267 */ 160 */
@@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev);
269extern void r420_fini(struct radeon_device *rdev); 162extern void r420_fini(struct radeon_device *rdev);
270extern int r420_suspend(struct radeon_device *rdev); 163extern int r420_suspend(struct radeon_device *rdev);
271extern int r420_resume(struct radeon_device *rdev); 164extern int r420_resume(struct radeon_device *rdev);
272static struct radeon_asic r420_asic = {
273 .init = &r420_init,
274 .fini = &r420_fini,
275 .suspend = &r420_suspend,
276 .resume = &r420_resume,
277 .vga_set_state = &r100_vga_set_state,
278 .gpu_reset = &r300_gpu_reset,
279 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
280 .gart_set_page = &rv370_pcie_gart_set_page,
281 .cp_commit = &r100_cp_commit,
282 .ring_start = &r300_ring_start,
283 .ring_test = &r100_ring_test,
284 .ring_ib_execute = &r100_ring_ib_execute,
285 .irq_set = &r100_irq_set,
286 .irq_process = &r100_irq_process,
287 .get_vblank_counter = &r100_get_vblank_counter,
288 .fence_ring_emit = &r300_fence_ring_emit,
289 .cs_parse = &r300_cs_parse,
290 .copy_blit = &r100_copy_blit,
291 .copy_dma = &r200_copy_dma,
292 .copy = &r100_copy_blit,
293 .get_engine_clock = &radeon_atom_get_engine_clock,
294 .set_engine_clock = &radeon_atom_set_engine_clock,
295 .get_memory_clock = &radeon_atom_get_memory_clock,
296 .set_memory_clock = &radeon_atom_set_memory_clock,
297 .get_pcie_lanes = &rv370_get_pcie_lanes,
298 .set_pcie_lanes = &rv370_set_pcie_lanes,
299 .set_clock_gating = &radeon_atom_set_clock_gating,
300 .set_surface_reg = r100_set_surface_reg,
301 .clear_surface_reg = r100_clear_surface_reg,
302 .bandwidth_update = &r100_bandwidth_update,
303 .hpd_init = &r100_hpd_init,
304 .hpd_fini = &r100_hpd_fini,
305 .hpd_sense = &r100_hpd_sense,
306 .hpd_set_polarity = &r100_hpd_set_polarity,
307 .ioctl_wait_idle = NULL,
308};
309
310 165
311/* 166/*
312 * rs400,rs480 167 * rs400,rs480
@@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
319int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); 174int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
320uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); 175uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
321void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 176void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
322static struct radeon_asic rs400_asic = {
323 .init = &rs400_init,
324 .fini = &rs400_fini,
325 .suspend = &rs400_suspend,
326 .resume = &rs400_resume,
327 .vga_set_state = &r100_vga_set_state,
328 .gpu_reset = &r300_gpu_reset,
329 .gart_tlb_flush = &rs400_gart_tlb_flush,
330 .gart_set_page = &rs400_gart_set_page,
331 .cp_commit = &r100_cp_commit,
332 .ring_start = &r300_ring_start,
333 .ring_test = &r100_ring_test,
334 .ring_ib_execute = &r100_ring_ib_execute,
335 .irq_set = &r100_irq_set,
336 .irq_process = &r100_irq_process,
337 .get_vblank_counter = &r100_get_vblank_counter,
338 .fence_ring_emit = &r300_fence_ring_emit,
339 .cs_parse = &r300_cs_parse,
340 .copy_blit = &r100_copy_blit,
341 .copy_dma = &r200_copy_dma,
342 .copy = &r100_copy_blit,
343 .get_engine_clock = &radeon_legacy_get_engine_clock,
344 .set_engine_clock = &radeon_legacy_set_engine_clock,
345 .get_memory_clock = &radeon_legacy_get_memory_clock,
346 .set_memory_clock = NULL,
347 .get_pcie_lanes = NULL,
348 .set_pcie_lanes = NULL,
349 .set_clock_gating = &radeon_legacy_set_clock_gating,
350 .set_surface_reg = r100_set_surface_reg,
351 .clear_surface_reg = r100_clear_surface_reg,
352 .bandwidth_update = &r100_bandwidth_update,
353 .hpd_init = &r100_hpd_init,
354 .hpd_fini = &r100_hpd_fini,
355 .hpd_sense = &r100_hpd_sense,
356 .hpd_set_polarity = &r100_hpd_set_polarity,
357 .ioctl_wait_idle = NULL,
358};
359
360 177
361/* 178/*
362 * rs600. 179 * rs600.
@@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
379void rs600_hpd_set_polarity(struct radeon_device *rdev, 196void rs600_hpd_set_polarity(struct radeon_device *rdev,
380 enum radeon_hpd_id hpd); 197 enum radeon_hpd_id hpd);
381 198
382static struct radeon_asic rs600_asic = {
383 .init = &rs600_init,
384 .fini = &rs600_fini,
385 .suspend = &rs600_suspend,
386 .resume = &rs600_resume,
387 .vga_set_state = &r100_vga_set_state,
388 .gpu_reset = &r300_gpu_reset,
389 .gart_tlb_flush = &rs600_gart_tlb_flush,
390 .gart_set_page = &rs600_gart_set_page,
391 .cp_commit = &r100_cp_commit,
392 .ring_start = &r300_ring_start,
393 .ring_test = &r100_ring_test,
394 .ring_ib_execute = &r100_ring_ib_execute,
395 .irq_set = &rs600_irq_set,
396 .irq_process = &rs600_irq_process,
397 .get_vblank_counter = &rs600_get_vblank_counter,
398 .fence_ring_emit = &r300_fence_ring_emit,
399 .cs_parse = &r300_cs_parse,
400 .copy_blit = &r100_copy_blit,
401 .copy_dma = &r200_copy_dma,
402 .copy = &r100_copy_blit,
403 .get_engine_clock = &radeon_atom_get_engine_clock,
404 .set_engine_clock = &radeon_atom_set_engine_clock,
405 .get_memory_clock = &radeon_atom_get_memory_clock,
406 .set_memory_clock = &radeon_atom_set_memory_clock,
407 .get_pcie_lanes = NULL,
408 .set_pcie_lanes = NULL,
409 .set_clock_gating = &radeon_atom_set_clock_gating,
410 .set_surface_reg = r100_set_surface_reg,
411 .clear_surface_reg = r100_clear_surface_reg,
412 .bandwidth_update = &rs600_bandwidth_update,
413 .hpd_init = &rs600_hpd_init,
414 .hpd_fini = &rs600_hpd_fini,
415 .hpd_sense = &rs600_hpd_sense,
416 .hpd_set_polarity = &rs600_hpd_set_polarity,
417 .ioctl_wait_idle = NULL,
418};
419
420
421/* 199/*
422 * rs690,rs740 200 * rs690,rs740
423 */ 201 */
@@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev);
428uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); 206uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
429void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 207void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
430void rs690_bandwidth_update(struct radeon_device *rdev); 208void rs690_bandwidth_update(struct radeon_device *rdev);
431static struct radeon_asic rs690_asic = {
432 .init = &rs690_init,
433 .fini = &rs690_fini,
434 .suspend = &rs690_suspend,
435 .resume = &rs690_resume,
436 .vga_set_state = &r100_vga_set_state,
437 .gpu_reset = &r300_gpu_reset,
438 .gart_tlb_flush = &rs400_gart_tlb_flush,
439 .gart_set_page = &rs400_gart_set_page,
440 .cp_commit = &r100_cp_commit,
441 .ring_start = &r300_ring_start,
442 .ring_test = &r100_ring_test,
443 .ring_ib_execute = &r100_ring_ib_execute,
444 .irq_set = &rs600_irq_set,
445 .irq_process = &rs600_irq_process,
446 .get_vblank_counter = &rs600_get_vblank_counter,
447 .fence_ring_emit = &r300_fence_ring_emit,
448 .cs_parse = &r300_cs_parse,
449 .copy_blit = &r100_copy_blit,
450 .copy_dma = &r200_copy_dma,
451 .copy = &r200_copy_dma,
452 .get_engine_clock = &radeon_atom_get_engine_clock,
453 .set_engine_clock = &radeon_atom_set_engine_clock,
454 .get_memory_clock = &radeon_atom_get_memory_clock,
455 .set_memory_clock = &radeon_atom_set_memory_clock,
456 .get_pcie_lanes = NULL,
457 .set_pcie_lanes = NULL,
458 .set_clock_gating = &radeon_atom_set_clock_gating,
459 .set_surface_reg = r100_set_surface_reg,
460 .clear_surface_reg = r100_clear_surface_reg,
461 .bandwidth_update = &rs690_bandwidth_update,
462 .hpd_init = &rs600_hpd_init,
463 .hpd_fini = &rs600_hpd_fini,
464 .hpd_sense = &rs600_hpd_sense,
465 .hpd_set_polarity = &rs600_hpd_set_polarity,
466 .ioctl_wait_idle = NULL,
467};
468
469 209
470/* 210/*
471 * rv515 211 * rv515
@@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
481void rv515_bandwidth_update(struct radeon_device *rdev); 221void rv515_bandwidth_update(struct radeon_device *rdev);
482int rv515_resume(struct radeon_device *rdev); 222int rv515_resume(struct radeon_device *rdev);
483int rv515_suspend(struct radeon_device *rdev); 223int rv515_suspend(struct radeon_device *rdev);
484static struct radeon_asic rv515_asic = {
485 .init = &rv515_init,
486 .fini = &rv515_fini,
487 .suspend = &rv515_suspend,
488 .resume = &rv515_resume,
489 .vga_set_state = &r100_vga_set_state,
490 .gpu_reset = &rv515_gpu_reset,
491 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
492 .gart_set_page = &rv370_pcie_gart_set_page,
493 .cp_commit = &r100_cp_commit,
494 .ring_start = &rv515_ring_start,
495 .ring_test = &r100_ring_test,
496 .ring_ib_execute = &r100_ring_ib_execute,
497 .irq_set = &rs600_irq_set,
498 .irq_process = &rs600_irq_process,
499 .get_vblank_counter = &rs600_get_vblank_counter,
500 .fence_ring_emit = &r300_fence_ring_emit,
501 .cs_parse = &r300_cs_parse,
502 .copy_blit = &r100_copy_blit,
503 .copy_dma = &r200_copy_dma,
504 .copy = &r100_copy_blit,
505 .get_engine_clock = &radeon_atom_get_engine_clock,
506 .set_engine_clock = &radeon_atom_set_engine_clock,
507 .get_memory_clock = &radeon_atom_get_memory_clock,
508 .set_memory_clock = &radeon_atom_set_memory_clock,
509 .get_pcie_lanes = &rv370_get_pcie_lanes,
510 .set_pcie_lanes = &rv370_set_pcie_lanes,
511 .set_clock_gating = &radeon_atom_set_clock_gating,
512 .set_surface_reg = r100_set_surface_reg,
513 .clear_surface_reg = r100_clear_surface_reg,
514 .bandwidth_update = &rv515_bandwidth_update,
515 .hpd_init = &rs600_hpd_init,
516 .hpd_fini = &rs600_hpd_fini,
517 .hpd_sense = &rs600_hpd_sense,
518 .hpd_set_polarity = &rs600_hpd_set_polarity,
519 .ioctl_wait_idle = NULL,
520};
521
522 224
523/* 225/*
524 * r520,rv530,rv560,rv570,r580 226 * r520,rv530,rv560,rv570,r580
525 */ 227 */
526int r520_init(struct radeon_device *rdev); 228int r520_init(struct radeon_device *rdev);
527int r520_resume(struct radeon_device *rdev); 229int r520_resume(struct radeon_device *rdev);
528static struct radeon_asic r520_asic = {
529 .init = &r520_init,
530 .fini = &rv515_fini,
531 .suspend = &rv515_suspend,
532 .resume = &r520_resume,
533 .vga_set_state = &r100_vga_set_state,
534 .gpu_reset = &rv515_gpu_reset,
535 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
536 .gart_set_page = &rv370_pcie_gart_set_page,
537 .cp_commit = &r100_cp_commit,
538 .ring_start = &rv515_ring_start,
539 .ring_test = &r100_ring_test,
540 .ring_ib_execute = &r100_ring_ib_execute,
541 .irq_set = &rs600_irq_set,
542 .irq_process = &rs600_irq_process,
543 .get_vblank_counter = &rs600_get_vblank_counter,
544 .fence_ring_emit = &r300_fence_ring_emit,
545 .cs_parse = &r300_cs_parse,
546 .copy_blit = &r100_copy_blit,
547 .copy_dma = &r200_copy_dma,
548 .copy = &r100_copy_blit,
549 .get_engine_clock = &radeon_atom_get_engine_clock,
550 .set_engine_clock = &radeon_atom_set_engine_clock,
551 .get_memory_clock = &radeon_atom_get_memory_clock,
552 .set_memory_clock = &radeon_atom_set_memory_clock,
553 .get_pcie_lanes = &rv370_get_pcie_lanes,
554 .set_pcie_lanes = &rv370_set_pcie_lanes,
555 .set_clock_gating = &radeon_atom_set_clock_gating,
556 .set_surface_reg = r100_set_surface_reg,
557 .clear_surface_reg = r100_clear_surface_reg,
558 .bandwidth_update = &rv515_bandwidth_update,
559 .hpd_init = &rs600_hpd_init,
560 .hpd_fini = &rs600_hpd_fini,
561 .hpd_sense = &rs600_hpd_sense,
562 .hpd_set_polarity = &rs600_hpd_set_polarity,
563 .ioctl_wait_idle = NULL,
564};
565 230
566/* 231/*
567 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880 232 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev);
591int r600_set_surface_reg(struct radeon_device *rdev, int reg, 256int r600_set_surface_reg(struct radeon_device *rdev, int reg,
592 uint32_t tiling_flags, uint32_t pitch, 257 uint32_t tiling_flags, uint32_t pitch,
593 uint32_t offset, uint32_t obj_size); 258 uint32_t offset, uint32_t obj_size);
594int r600_clear_surface_reg(struct radeon_device *rdev, int reg); 259void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
595void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 260void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
596int r600_ring_test(struct radeon_device *rdev); 261int r600_ring_test(struct radeon_device *rdev);
597int r600_copy_blit(struct radeon_device *rdev, 262int r600_copy_blit(struct radeon_device *rdev,
@@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
604 enum radeon_hpd_id hpd); 269 enum radeon_hpd_id hpd);
605extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo); 270extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
606 271
607static struct radeon_asic r600_asic = {
608 .init = &r600_init,
609 .fini = &r600_fini,
610 .suspend = &r600_suspend,
611 .resume = &r600_resume,
612 .cp_commit = &r600_cp_commit,
613 .vga_set_state = &r600_vga_set_state,
614 .gpu_reset = &r600_gpu_reset,
615 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
616 .gart_set_page = &rs600_gart_set_page,
617 .ring_test = &r600_ring_test,
618 .ring_ib_execute = &r600_ring_ib_execute,
619 .irq_set = &r600_irq_set,
620 .irq_process = &r600_irq_process,
621 .get_vblank_counter = &rs600_get_vblank_counter,
622 .fence_ring_emit = &r600_fence_ring_emit,
623 .cs_parse = &r600_cs_parse,
624 .copy_blit = &r600_copy_blit,
625 .copy_dma = &r600_copy_blit,
626 .copy = &r600_copy_blit,
627 .get_engine_clock = &radeon_atom_get_engine_clock,
628 .set_engine_clock = &radeon_atom_set_engine_clock,
629 .get_memory_clock = &radeon_atom_get_memory_clock,
630 .set_memory_clock = &radeon_atom_set_memory_clock,
631 .get_pcie_lanes = &rv370_get_pcie_lanes,
632 .set_pcie_lanes = NULL,
633 .set_clock_gating = NULL,
634 .set_surface_reg = r600_set_surface_reg,
635 .clear_surface_reg = r600_clear_surface_reg,
636 .bandwidth_update = &rv515_bandwidth_update,
637 .hpd_init = &r600_hpd_init,
638 .hpd_fini = &r600_hpd_fini,
639 .hpd_sense = &r600_hpd_sense,
640 .hpd_set_polarity = &r600_hpd_set_polarity,
641 .ioctl_wait_idle = r600_ioctl_wait_idle,
642};
643
644/* 272/*
645 * rv770,rv730,rv710,rv740 273 * rv770,rv730,rv710,rv740
646 */ 274 */
@@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev);
650int rv770_resume(struct radeon_device *rdev); 278int rv770_resume(struct radeon_device *rdev);
651int rv770_gpu_reset(struct radeon_device *rdev); 279int rv770_gpu_reset(struct radeon_device *rdev);
652 280
653static struct radeon_asic rv770_asic = {
654 .init = &rv770_init,
655 .fini = &rv770_fini,
656 .suspend = &rv770_suspend,
657 .resume = &rv770_resume,
658 .cp_commit = &r600_cp_commit,
659 .gpu_reset = &rv770_gpu_reset,
660 .vga_set_state = &r600_vga_set_state,
661 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
662 .gart_set_page = &rs600_gart_set_page,
663 .ring_test = &r600_ring_test,
664 .ring_ib_execute = &r600_ring_ib_execute,
665 .irq_set = &r600_irq_set,
666 .irq_process = &r600_irq_process,
667 .get_vblank_counter = &rs600_get_vblank_counter,
668 .fence_ring_emit = &r600_fence_ring_emit,
669 .cs_parse = &r600_cs_parse,
670 .copy_blit = &r600_copy_blit,
671 .copy_dma = &r600_copy_blit,
672 .copy = &r600_copy_blit,
673 .get_engine_clock = &radeon_atom_get_engine_clock,
674 .set_engine_clock = &radeon_atom_set_engine_clock,
675 .get_memory_clock = &radeon_atom_get_memory_clock,
676 .set_memory_clock = &radeon_atom_set_memory_clock,
677 .get_pcie_lanes = &rv370_get_pcie_lanes,
678 .set_pcie_lanes = NULL,
679 .set_clock_gating = &radeon_atom_set_clock_gating,
680 .set_surface_reg = r600_set_surface_reg,
681 .clear_surface_reg = r600_clear_surface_reg,
682 .bandwidth_update = &rv515_bandwidth_update,
683 .hpd_init = &r600_hpd_init,
684 .hpd_fini = &r600_hpd_fini,
685 .hpd_sense = &r600_hpd_sense,
686 .hpd_set_polarity = &r600_hpd_set_polarity,
687 .ioctl_wait_idle = r600_ioctl_wait_idle,
688};
689
690/* 281/*
691 * evergreen 282 * evergreen
692 */ 283 */
@@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev);
701bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 292bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
702void evergreen_hpd_set_polarity(struct radeon_device *rdev, 293void evergreen_hpd_set_polarity(struct radeon_device *rdev,
703 enum radeon_hpd_id hpd); 294 enum radeon_hpd_id hpd);
704
705static struct radeon_asic evergreen_asic = {
706 .init = &evergreen_init,
707 .fini = &evergreen_fini,
708 .suspend = &evergreen_suspend,
709 .resume = &evergreen_resume,
710 .cp_commit = NULL,
711 .gpu_reset = &evergreen_gpu_reset,
712 .vga_set_state = &r600_vga_set_state,
713 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
714 .gart_set_page = &rs600_gart_set_page,
715 .ring_test = NULL,
716 .ring_ib_execute = NULL,
717 .irq_set = NULL,
718 .irq_process = NULL,
719 .get_vblank_counter = NULL,
720 .fence_ring_emit = NULL,
721 .cs_parse = NULL,
722 .copy_blit = NULL,
723 .copy_dma = NULL,
724 .copy = NULL,
725 .get_engine_clock = &radeon_atom_get_engine_clock,
726 .set_engine_clock = &radeon_atom_set_engine_clock,
727 .get_memory_clock = &radeon_atom_get_memory_clock,
728 .set_memory_clock = &radeon_atom_set_memory_clock,
729 .set_pcie_lanes = NULL,
730 .set_clock_gating = NULL,
731 .set_surface_reg = r600_set_surface_reg,
732 .clear_surface_reg = r600_clear_surface_reg,
733 .bandwidth_update = &evergreen_bandwidth_update,
734 .hpd_init = &evergreen_hpd_init,
735 .hpd_fini = &evergreen_hpd_fini,
736 .hpd_sense = &evergreen_hpd_sense,
737 .hpd_set_polarity = &evergreen_hpd_set_polarity,
738};
739
740#endif 295#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 93783b15c81d..9916d825401c 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -69,52 +69,54 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
69 struct radeon_i2c_bus_rec i2c; 69 struct radeon_i2c_bus_rec i2c;
70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 71 struct _ATOM_GPIO_I2C_INFO *i2c_info;
72 uint16_t data_offset; 72 uint16_t data_offset, size;
73 int i; 73 int i, num_indices;
74 74
75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
76 i2c.valid = false; 76 i2c.valid = false;
77 77
78 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); 78 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
79 79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80
81 81 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
82 82 sizeof(ATOM_GPIO_I2C_ASSIGMENT);
83 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 83
84 gpio = &i2c_info->asGPIO_Info[i]; 84 for (i = 0; i < num_indices; i++) {
85 85 gpio = &i2c_info->asGPIO_Info[i];
86 if (gpio->sucI2cId.ucAccess == id) { 86
87 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 87 if (gpio->sucI2cId.ucAccess == id) {
88 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 88 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
89 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 89 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
90 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 90 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
91 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 91 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
92 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 92 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
93 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 93 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
94 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 94 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
95 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 95 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
96 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 96 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
97 i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 97 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
98 i2c.en_data_mask = (1 << gpio->ucDataEnShift); 98 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
99 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 99 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
100 i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 100 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
101 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 101 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
102 i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 102 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
103 103 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
104 if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 104
105 i2c.hw_capable = true; 105 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
106 else 106 i2c.hw_capable = true;
107 i2c.hw_capable = false; 107 else
108 108 i2c.hw_capable = false;
109 if (gpio->sucI2cId.ucAccess == 0xa0) 109
110 i2c.mm_i2c = true; 110 if (gpio->sucI2cId.ucAccess == 0xa0)
111 else 111 i2c.mm_i2c = true;
112 i2c.mm_i2c = false; 112 else
113 113 i2c.mm_i2c = false;
114 i2c.i2c_id = gpio->sucI2cId.ucAccess; 114
115 115 i2c.i2c_id = gpio->sucI2cId.ucAccess;
116 i2c.valid = true; 116
117 break; 117 i2c.valid = true;
118 break;
119 }
118 } 120 }
119 } 121 }
120 122
@@ -135,20 +137,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
135 memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); 137 memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
136 gpio.valid = false; 138 gpio.valid = false;
137 139
138 atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); 140 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
141 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
139 142
140 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); 143 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
144 sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
141 145
142 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); 146 for (i = 0; i < num_indices; i++) {
143 147 pin = &gpio_info->asGPIO_Pin[i];
144 for (i = 0; i < num_indices; i++) { 148 if (id == pin->ucGPIO_ID) {
145 pin = &gpio_info->asGPIO_Pin[i]; 149 gpio.id = pin->ucGPIO_ID;
146 if (id == pin->ucGPIO_ID) { 150 gpio.reg = pin->usGpioPin_AIndex * 4;
147 gpio.id = pin->ucGPIO_ID; 151 gpio.mask = (1 << pin->ucGpioPinBitShift);
148 gpio.reg = pin->usGpioPin_AIndex * 4; 152 gpio.valid = true;
149 gpio.mask = (1 << pin->ucGpioPinBitShift); 153 break;
150 gpio.valid = true; 154 }
151 break;
152 } 155 }
153 } 156 }
154 157
@@ -264,6 +267,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
264 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || 267 if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
265 (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) 268 (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
266 return false; 269 return false;
270 if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
271 *line_mux = 0x90;
267 } 272 }
268 273
269 /* ASUS HD 3600 XT board lists the DVI port as HDMI */ 274 /* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -395,9 +400,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
395 struct radeon_gpio_rec gpio; 400 struct radeon_gpio_rec gpio;
396 struct radeon_hpd hpd; 401 struct radeon_hpd hpd;
397 402
398 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 403 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
399
400 if (data_offset == 0)
401 return false; 404 return false;
402 405
403 if (crev < 2) 406 if (crev < 2)
@@ -449,37 +452,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
449 GetIndexIntoMasterTable(DATA, 452 GetIndexIntoMasterTable(DATA,
450 IntegratedSystemInfo); 453 IntegratedSystemInfo);
451 454
452 atom_parse_data_header(ctx, index, &size, &frev, 455 if (atom_parse_data_header(ctx, index, &size, &frev,
453 &crev, &igp_offset); 456 &crev, &igp_offset)) {
454 457
455 if (crev >= 2) { 458 if (crev >= 2) {
456 igp_obj = 459 igp_obj =
457 (ATOM_INTEGRATED_SYSTEM_INFO_V2 460 (ATOM_INTEGRATED_SYSTEM_INFO_V2
458 *) (ctx->bios + igp_offset); 461 *) (ctx->bios + igp_offset);
459 462
460 if (igp_obj) { 463 if (igp_obj) {
461 uint32_t slot_config, ct; 464 uint32_t slot_config, ct;
462 465
463 if (con_obj_num == 1) 466 if (con_obj_num == 1)
464 slot_config = 467 slot_config =
465 igp_obj-> 468 igp_obj->
466 ulDDISlot1Config; 469 ulDDISlot1Config;
467 else 470 else
468 slot_config = 471 slot_config =
469 igp_obj-> 472 igp_obj->
470 ulDDISlot2Config; 473 ulDDISlot2Config;
471 474
472 ct = (slot_config >> 16) & 0xff; 475 ct = (slot_config >> 16) & 0xff;
473 connector_type = 476 connector_type =
474 object_connector_convert 477 object_connector_convert
475 [ct]; 478 [ct];
476 connector_object_id = ct; 479 connector_object_id = ct;
477 igp_lane_info = 480 igp_lane_info =
478 slot_config & 0xffff; 481 slot_config & 0xffff;
482 } else
483 continue;
479 } else 484 } else
480 continue; 485 continue;
481 } else 486 } else {
482 continue; 487 igp_lane_info = 0;
488 connector_type =
489 object_connector_convert[con_obj_id];
490 connector_object_id = con_obj_id;
491 }
483 } else { 492 } else {
484 igp_lane_info = 0; 493 igp_lane_info = 0;
485 connector_type = 494 connector_type =
@@ -627,20 +636,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
627 uint8_t frev, crev; 636 uint8_t frev, crev;
628 ATOM_XTMDS_INFO *xtmds; 637 ATOM_XTMDS_INFO *xtmds;
629 638
630 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 639 if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
631 xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); 640 xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
632 641
633 if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { 642 if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
634 if (connector_type == DRM_MODE_CONNECTOR_DVII) 643 if (connector_type == DRM_MODE_CONNECTOR_DVII)
635 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; 644 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
636 else 645 else
637 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; 646 return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
638 } else { 647 } else {
639 if (connector_type == DRM_MODE_CONNECTOR_DVII) 648 if (connector_type == DRM_MODE_CONNECTOR_DVII)
640 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; 649 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
641 else 650 else
642 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; 651 return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
643 } 652 }
653 } else
654 return supported_devices_connector_object_id_convert
655 [connector_type];
644 } else { 656 } else {
645 return supported_devices_connector_object_id_convert 657 return supported_devices_connector_object_id_convert
646 [connector_type]; 658 [connector_type];
@@ -672,7 +684,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
672 int i, j, max_device; 684 int i, j, max_device;
673 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 685 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
674 686
675 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 687 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
688 return false;
676 689
677 supported_devices = 690 supported_devices =
678 (union atom_supported_devices *)(ctx->bios + data_offset); 691 (union atom_supported_devices *)(ctx->bios + data_offset);
@@ -865,14 +878,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
865 struct radeon_pll *mpll = &rdev->clock.mpll; 878 struct radeon_pll *mpll = &rdev->clock.mpll;
866 uint16_t data_offset; 879 uint16_t data_offset;
867 880
868 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 881 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
869 &crev, &data_offset); 882 &frev, &crev, &data_offset)) {
870 883 firmware_info =
871 firmware_info = 884 (union firmware_info *)(mode_info->atom_context->bios +
872 (union firmware_info *)(mode_info->atom_context->bios + 885 data_offset);
873 data_offset);
874
875 if (firmware_info) {
876 /* pixel clocks */ 886 /* pixel clocks */
877 p1pll->reference_freq = 887 p1pll->reference_freq =
878 le16_to_cpu(firmware_info->info.usReferenceClock); 888 le16_to_cpu(firmware_info->info.usReferenceClock);
@@ -887,6 +897,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
887 p1pll->pll_out_max = 897 p1pll->pll_out_max =
888 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); 898 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
889 899
900 if (crev >= 4) {
901 p1pll->lcd_pll_out_min =
902 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
903 if (p1pll->lcd_pll_out_min == 0)
904 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
905 p1pll->lcd_pll_out_max =
906 le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
907 if (p1pll->lcd_pll_out_max == 0)
908 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
909 } else {
910 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
911 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
912 }
913
890 if (p1pll->pll_out_min == 0) { 914 if (p1pll->pll_out_min == 0) {
891 if (ASIC_IS_AVIVO(rdev)) 915 if (ASIC_IS_AVIVO(rdev))
892 p1pll->pll_out_min = 64800; 916 p1pll->pll_out_min = 64800;
@@ -992,13 +1016,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
992 u8 frev, crev; 1016 u8 frev, crev;
993 u16 data_offset; 1017 u16 data_offset;
994 1018
995 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1019 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
996 &crev, &data_offset); 1020 &frev, &crev, &data_offset)) {
997 1021 igp_info = (union igp_info *)(mode_info->atom_context->bios +
998 igp_info = (union igp_info *)(mode_info->atom_context->bios +
999 data_offset); 1022 data_offset);
1000
1001 if (igp_info) {
1002 switch (crev) { 1023 switch (crev) {
1003 case 1: 1024 case 1:
1004 if (igp_info->info.ucMemoryType & 0xf0) 1025 if (igp_info->info.ucMemoryType & 0xf0)
@@ -1029,14 +1050,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
1029 uint16_t maxfreq; 1050 uint16_t maxfreq;
1030 int i; 1051 int i;
1031 1052
1032 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1053 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1033 &crev, &data_offset); 1054 &frev, &crev, &data_offset)) {
1055 tmds_info =
1056 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
1057 data_offset);
1034 1058
1035 tmds_info =
1036 (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
1037 data_offset);
1038
1039 if (tmds_info) {
1040 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); 1059 maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
1041 for (i = 0; i < 4; i++) { 1060 for (i = 0; i < 4; i++) {
1042 tmds->tmds_pll[i].freq = 1061 tmds->tmds_pll[i].freq =
@@ -1085,13 +1104,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1085 if (id > ATOM_MAX_SS_ENTRY) 1104 if (id > ATOM_MAX_SS_ENTRY)
1086 return NULL; 1105 return NULL;
1087 1106
1088 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1107 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1089 &crev, &data_offset); 1108 &frev, &crev, &data_offset)) {
1090 1109 ss_info =
1091 ss_info = 1110 (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
1092 (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
1093 1111
1094 if (ss_info) {
1095 ss = 1112 ss =
1096 kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL); 1113 kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
1097 1114
@@ -1114,30 +1131,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
1114 return ss; 1131 return ss;
1115} 1132}
1116 1133
1117static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
1118 struct radeon_encoder_atom_dig *lvds)
1119{
1120
1121 /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
1122 if ((dev->pdev->device == 0x95c4) &&
1123 (dev->pdev->subsystem_vendor == 0x1179) &&
1124 (dev->pdev->subsystem_device == 0xff50)) {
1125 if ((lvds->native_mode.hdisplay == 1280) &&
1126 (lvds->native_mode.vdisplay == 800))
1127 lvds->pll_algo = PLL_ALGO_LEGACY;
1128 }
1129
1130 /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
1131 if ((dev->pdev->device == 0x95c4) &&
1132 (dev->pdev->subsystem_vendor == 0x1028) &&
1133 (dev->pdev->subsystem_device == 0x029f)) {
1134 if ((lvds->native_mode.hdisplay == 1280) &&
1135 (lvds->native_mode.vdisplay == 800))
1136 lvds->pll_algo = PLL_ALGO_LEGACY;
1137 }
1138
1139}
1140
1141union lvds_info { 1134union lvds_info {
1142 struct _ATOM_LVDS_INFO info; 1135 struct _ATOM_LVDS_INFO info;
1143 struct _ATOM_LVDS_INFO_V12 info_12; 1136 struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1156,13 +1149,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1156 uint8_t frev, crev; 1149 uint8_t frev, crev;
1157 struct radeon_encoder_atom_dig *lvds = NULL; 1150 struct radeon_encoder_atom_dig *lvds = NULL;
1158 1151
1159 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, 1152 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1160 &crev, &data_offset); 1153 &frev, &crev, &data_offset)) {
1161 1154 lvds_info =
1162 lvds_info = 1155 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
1163 (union lvds_info *)(mode_info->atom_context->bios + data_offset);
1164
1165 if (lvds_info) {
1166 lvds = 1156 lvds =
1167 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); 1157 kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
1168 1158
@@ -1220,9 +1210,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1220 lvds->pll_algo = PLL_ALGO_LEGACY; 1210 lvds->pll_algo = PLL_ALGO_LEGACY;
1221 } 1211 }
1222 1212
1223 /* LVDS quirks */
1224 radeon_atom_apply_lvds_quirks(dev, lvds);
1225
1226 encoder->native_mode = lvds->native_mode; 1213 encoder->native_mode = lvds->native_mode;
1227 } 1214 }
1228 return lvds; 1215 return lvds;
@@ -1241,11 +1228,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
1241 uint8_t bg, dac; 1228 uint8_t bg, dac;
1242 struct radeon_encoder_primary_dac *p_dac = NULL; 1229 struct radeon_encoder_primary_dac *p_dac = NULL;
1243 1230
1244 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1231 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1245 1232 &frev, &crev, &data_offset)) {
1246 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); 1233 dac_info = (struct _COMPASSIONATE_DATA *)
1234 (mode_info->atom_context->bios + data_offset);
1247 1235
1248 if (dac_info) {
1249 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); 1236 p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
1250 1237
1251 if (!p_dac) 1238 if (!p_dac)
@@ -1270,12 +1257,14 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1270 u8 frev, crev; 1257 u8 frev, crev;
1271 u16 data_offset, misc; 1258 u16 data_offset, misc;
1272 1259
1273 atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); 1260 if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
1261 &frev, &crev, &data_offset))
1262 return false;
1274 1263
1275 switch (crev) { 1264 switch (crev) {
1276 case 1: 1265 case 1:
1277 tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); 1266 tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
1278 if (index > MAX_SUPPORTED_TV_TIMING) 1267 if (index >= MAX_SUPPORTED_TV_TIMING)
1279 return false; 1268 return false;
1280 1269
1281 mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); 1270 mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total);
@@ -1313,7 +1302,7 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
1313 break; 1302 break;
1314 case 2: 1303 case 2:
1315 tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); 1304 tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset);
1316 if (index > MAX_SUPPORTED_TV_TIMING_V1_2) 1305 if (index >= MAX_SUPPORTED_TV_TIMING_V1_2)
1317 return false; 1306 return false;
1318 1307
1319 dtd_timings = &tv_info_v1_2->aModeTimings[index]; 1308 dtd_timings = &tv_info_v1_2->aModeTimings[index];
@@ -1362,47 +1351,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
1362 struct _ATOM_ANALOG_TV_INFO *tv_info; 1351 struct _ATOM_ANALOG_TV_INFO *tv_info;
1363 enum radeon_tv_std tv_std = TV_STD_NTSC; 1352 enum radeon_tv_std tv_std = TV_STD_NTSC;
1364 1353
1365 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1354 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1355 &frev, &crev, &data_offset)) {
1366 1356
1367 tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); 1357 tv_info = (struct _ATOM_ANALOG_TV_INFO *)
1358 (mode_info->atom_context->bios + data_offset);
1368 1359
1369 switch (tv_info->ucTV_BootUpDefaultStandard) { 1360 switch (tv_info->ucTV_BootUpDefaultStandard) {
1370 case ATOM_TV_NTSC: 1361 case ATOM_TV_NTSC:
1371 tv_std = TV_STD_NTSC; 1362 tv_std = TV_STD_NTSC;
1372 DRM_INFO("Default TV standard: NTSC\n"); 1363 DRM_INFO("Default TV standard: NTSC\n");
1373 break; 1364 break;
1374 case ATOM_TV_NTSCJ: 1365 case ATOM_TV_NTSCJ:
1375 tv_std = TV_STD_NTSC_J; 1366 tv_std = TV_STD_NTSC_J;
1376 DRM_INFO("Default TV standard: NTSC-J\n"); 1367 DRM_INFO("Default TV standard: NTSC-J\n");
1377 break; 1368 break;
1378 case ATOM_TV_PAL: 1369 case ATOM_TV_PAL:
1379 tv_std = TV_STD_PAL; 1370 tv_std = TV_STD_PAL;
1380 DRM_INFO("Default TV standard: PAL\n"); 1371 DRM_INFO("Default TV standard: PAL\n");
1381 break; 1372 break;
1382 case ATOM_TV_PALM: 1373 case ATOM_TV_PALM:
1383 tv_std = TV_STD_PAL_M; 1374 tv_std = TV_STD_PAL_M;
1384 DRM_INFO("Default TV standard: PAL-M\n"); 1375 DRM_INFO("Default TV standard: PAL-M\n");
1385 break; 1376 break;
1386 case ATOM_TV_PALN: 1377 case ATOM_TV_PALN:
1387 tv_std = TV_STD_PAL_N; 1378 tv_std = TV_STD_PAL_N;
1388 DRM_INFO("Default TV standard: PAL-N\n"); 1379 DRM_INFO("Default TV standard: PAL-N\n");
1389 break; 1380 break;
1390 case ATOM_TV_PALCN: 1381 case ATOM_TV_PALCN:
1391 tv_std = TV_STD_PAL_CN; 1382 tv_std = TV_STD_PAL_CN;
1392 DRM_INFO("Default TV standard: PAL-CN\n"); 1383 DRM_INFO("Default TV standard: PAL-CN\n");
1393 break; 1384 break;
1394 case ATOM_TV_PAL60: 1385 case ATOM_TV_PAL60:
1395 tv_std = TV_STD_PAL_60; 1386 tv_std = TV_STD_PAL_60;
1396 DRM_INFO("Default TV standard: PAL-60\n"); 1387 DRM_INFO("Default TV standard: PAL-60\n");
1397 break; 1388 break;
1398 case ATOM_TV_SECAM: 1389 case ATOM_TV_SECAM:
1399 tv_std = TV_STD_SECAM; 1390 tv_std = TV_STD_SECAM;
1400 DRM_INFO("Default TV standard: SECAM\n"); 1391 DRM_INFO("Default TV standard: SECAM\n");
1401 break; 1392 break;
1402 default: 1393 default:
1403 tv_std = TV_STD_NTSC; 1394 tv_std = TV_STD_NTSC;
1404 DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); 1395 DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
1405 break; 1396 break;
1397 }
1406 } 1398 }
1407 return tv_std; 1399 return tv_std;
1408} 1400}
@@ -1420,11 +1412,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1420 uint8_t bg, dac; 1412 uint8_t bg, dac;
1421 struct radeon_encoder_tv_dac *tv_dac = NULL; 1413 struct radeon_encoder_tv_dac *tv_dac = NULL;
1422 1414
1423 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); 1415 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1416 &frev, &crev, &data_offset)) {
1424 1417
1425 dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset); 1418 dac_info = (struct _COMPASSIONATE_DATA *)
1419 (mode_info->atom_context->bios + data_offset);
1426 1420
1427 if (dac_info) {
1428 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); 1421 tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
1429 1422
1430 if (!tv_dac) 1423 if (!tv_dac)
@@ -1447,6 +1440,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
1447 return tv_dac; 1440 return tv_dac;
1448} 1441}
1449 1442
1443static const char *thermal_controller_names[] = {
1444 "NONE",
1445 "LM63",
1446 "ADM1032",
1447 "ADM1030",
1448 "MUA6649",
1449 "LM64",
1450 "F75375",
1451 "ASC7512",
1452};
1453
1454static const char *pp_lib_thermal_controller_names[] = {
1455 "NONE",
1456 "LM63",
1457 "ADM1032",
1458 "ADM1030",
1459 "MUA6649",
1460 "LM64",
1461 "F75375",
1462 "RV6xx",
1463 "RV770",
1464 "ADT7473",
1465};
1466
1450union power_info { 1467union power_info {
1451 struct _ATOM_POWERPLAY_INFO info; 1468 struct _ATOM_POWERPLAY_INFO info;
1452 struct _ATOM_POWERPLAY_INFO_V2 info_2; 1469 struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -1466,15 +1483,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1466 struct _ATOM_PPLIB_STATE *power_state; 1483 struct _ATOM_PPLIB_STATE *power_state;
1467 int num_modes = 0, i, j; 1484 int num_modes = 0, i, j;
1468 int state_index = 0, mode_index = 0; 1485 int state_index = 0, mode_index = 0;
1469 1486 struct radeon_i2c_bus_rec i2c_bus;
1470 atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
1471
1472 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1473 1487
1474 rdev->pm.default_power_state = NULL; 1488 rdev->pm.default_power_state = NULL;
1475 1489
1476 if (power_info) { 1490 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
1491 &frev, &crev, &data_offset)) {
1492 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1477 if (frev < 4) { 1493 if (frev < 4) {
1494 /* add the i2c bus for thermal/fan chip */
1495 if (power_info->info.ucOverdriveThermalController > 0) {
1496 DRM_INFO("Possible %s thermal controller at 0x%02x\n",
1497 thermal_controller_names[power_info->info.ucOverdriveThermalController],
1498 power_info->info.ucOverdriveControllerAddress >> 1);
1499 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
1500 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1501 }
1478 num_modes = power_info->info.ucNumOfPowerModeEntries; 1502 num_modes = power_info->info.ucNumOfPowerModeEntries;
1479 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) 1503 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1480 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; 1504 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
@@ -1684,6 +1708,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1684 } 1708 }
1685 } 1709 }
1686 } else if (frev == 4) { 1710 } else if (frev == 4) {
1711 /* add the i2c bus for thermal/fan chip */
1712 /* no support for internal controller yet */
1713 if (power_info->info_4.sThermalController.ucType > 0) {
1714 if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
1715 (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
1716 DRM_INFO("Internal thermal controller %s fan control\n",
1717 (power_info->info_4.sThermalController.ucFanParameters &
1718 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1719 } else {
1720 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
1721 pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
1722 power_info->info_4.sThermalController.ucI2cAddress >> 1,
1723 (power_info->info_4.sThermalController.ucFanParameters &
1724 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
1725 i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
1726 rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
1727 }
1728 }
1687 for (i = 0; i < power_info->info_4.ucNumStates; i++) { 1729 for (i = 0; i < power_info->info_4.ucNumStates; i++) {
1688 mode_index = 0; 1730 mode_index = 0;
1689 power_state = (struct _ATOM_PPLIB_STATE *) 1731 power_state = (struct _ATOM_PPLIB_STATE *)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 3f557c4151e0..ed5dfe58f29c 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -7,6 +7,7 @@
7 * ATPX support for both Intel/ATI 7 * ATPX support for both Intel/ATI
8 */ 8 */
9#include <linux/vga_switcheroo.h> 9#include <linux/vga_switcheroo.h>
10#include <linux/slab.h>
10#include <acpi/acpi.h> 11#include <acpi/acpi.h>
11#include <acpi/acpi_bus.h> 12#include <acpi/acpi_bus.h>
12#include <linux/pci.h> 13#include <linux/pci.h>
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 557240460526..8ad71f701316 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -31,6 +31,7 @@
31#include "atom.h" 31#include "atom.h"
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h>
34/* 35/*
35 * BIOS. 36 * BIOS.
36 */ 37 */
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e9ea38ece375..37db8adb2748 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
531 case CHIP_RS300: 531 case CHIP_RS300:
532 switch (ddc_line) { 532 switch (ddc_line) {
533 case RADEON_GPIO_DVI_DDC: 533 case RADEON_GPIO_DVI_DDC:
534 /* in theory this should be hw capable, 534 i2c.hw_capable = true;
535 * but it doesn't seem to work
536 */
537 i2c.hw_capable = false;
538 break; 535 break;
539 default: 536 default:
540 i2c.hw_capable = false; 537 i2c.hw_capable = false;
@@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
633 p1pll->reference_div = RBIOS16(pll_info + 0x10); 630 p1pll->reference_div = RBIOS16(pll_info + 0x10);
634 p1pll->pll_out_min = RBIOS32(pll_info + 0x12); 631 p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
635 p1pll->pll_out_max = RBIOS32(pll_info + 0x16); 632 p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
633 p1pll->lcd_pll_out_min = p1pll->pll_out_min;
634 p1pll->lcd_pll_out_max = p1pll->pll_out_max;
636 635
637 if (rev > 9) { 636 if (rev > 9) {
638 p1pll->pll_in_min = RBIOS32(pll_info + 0x36); 637 p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
@@ -761,7 +760,9 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
761 dac = RBIOS8(dac_info + 0x3) & 0xf; 760 dac = RBIOS8(dac_info + 0x3) & 0xf;
762 p_dac->ps2_pdac_adj = (bg << 8) | (dac); 761 p_dac->ps2_pdac_adj = (bg << 8) | (dac);
763 } 762 }
764 found = 1; 763 /* if the values are all zeros, use the table */
764 if (p_dac->ps2_pdac_adj)
765 found = 1;
765 } 766 }
766 767
767 if (!found) /* fallback to defaults */ 768 if (!found) /* fallback to defaults */
@@ -896,7 +897,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
896 bg = RBIOS8(dac_info + 0x10) & 0xf; 897 bg = RBIOS8(dac_info + 0x10) & 0xf;
897 dac = RBIOS8(dac_info + 0x11) & 0xf; 898 dac = RBIOS8(dac_info + 0x11) & 0xf;
898 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 899 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
899 found = 1; 900 /* if the values are all zeros, use the table */
901 if (tv_dac->ps2_tvdac_adj)
902 found = 1;
900 } else if (rev > 1) { 903 } else if (rev > 1) {
901 bg = RBIOS8(dac_info + 0xc) & 0xf; 904 bg = RBIOS8(dac_info + 0xc) & 0xf;
902 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; 905 dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf;
@@ -909,7 +912,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
909 bg = RBIOS8(dac_info + 0xe) & 0xf; 912 bg = RBIOS8(dac_info + 0xe) & 0xf;
910 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; 913 dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf;
911 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); 914 tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
912 found = 1; 915 /* if the values are all zeros, use the table */
916 if (tv_dac->ps2_tvdac_adj)
917 found = 1;
913 } 918 }
914 tv_dac->tv_std = radeon_combios_get_tv_info(rdev); 919 tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
915 } 920 }
@@ -926,7 +931,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
926 (bg << 16) | (dac << 20); 931 (bg << 16) | (dac << 20);
927 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 932 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
928 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 933 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
929 found = 1; 934 /* if the values are all zeros, use the table */
935 if (tv_dac->ps2_tvdac_adj)
936 found = 1;
930 } else { 937 } else {
931 bg = RBIOS8(dac_info + 0x4) & 0xf; 938 bg = RBIOS8(dac_info + 0x4) & 0xf;
932 dac = RBIOS8(dac_info + 0x5) & 0xf; 939 dac = RBIOS8(dac_info + 0x5) & 0xf;
@@ -934,7 +941,9 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
934 (bg << 16) | (dac << 20); 941 (bg << 16) | (dac << 20);
935 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; 942 tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj;
936 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; 943 tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj;
937 found = 1; 944 /* if the values are all zeros, use the table */
945 if (tv_dac->ps2_tvdac_adj)
946 found = 1;
938 } 947 }
939 } else { 948 } else {
940 DRM_INFO("No TV DAC info found in BIOS\n"); 949 DRM_INFO("No TV DAC info found in BIOS\n");
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee0083f982d8..4559a53d5e57 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -162,12 +162,14 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
162{ 162{
163 struct drm_device *dev = connector->dev; 163 struct drm_device *dev = connector->dev;
164 struct drm_connector *conflict; 164 struct drm_connector *conflict;
165 struct radeon_connector *radeon_conflict;
165 int i; 166 int i;
166 167
167 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { 168 list_for_each_entry(conflict, &dev->mode_config.connector_list, head) {
168 if (conflict == connector) 169 if (conflict == connector)
169 continue; 170 continue;
170 171
172 radeon_conflict = to_radeon_connector(conflict);
171 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 173 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
172 if (conflict->encoder_ids[i] == 0) 174 if (conflict->encoder_ids[i] == 0)
173 break; 175 break;
@@ -177,6 +179,9 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector,
177 if (conflict->status != connector_status_connected) 179 if (conflict->status != connector_status_connected)
178 continue; 180 continue;
179 181
182 if (radeon_conflict->use_digital)
183 continue;
184
180 if (priority == true) { 185 if (priority == true) {
181 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); 186 DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
182 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); 187 DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
@@ -287,6 +292,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
287 292
288 if (property == rdev->mode_info.coherent_mode_property) { 293 if (property == rdev->mode_info.coherent_mode_property) {
289 struct radeon_encoder_atom_dig *dig; 294 struct radeon_encoder_atom_dig *dig;
295 bool new_coherent_mode;
290 296
291 /* need to find digital encoder on connector */ 297 /* need to find digital encoder on connector */
292 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); 298 encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
@@ -299,8 +305,11 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
299 return 0; 305 return 0;
300 306
301 dig = radeon_encoder->enc_priv; 307 dig = radeon_encoder->enc_priv;
302 dig->coherent_mode = val ? true : false; 308 new_coherent_mode = val ? true : false;
303 radeon_property_change_mode(&radeon_encoder->base); 309 if (dig->coherent_mode != new_coherent_mode) {
310 dig->coherent_mode = new_coherent_mode;
311 radeon_property_change_mode(&radeon_encoder->base);
312 }
304 } 313 }
305 314
306 if (property == rdev->mode_info.tv_std_property) { 315 if (property == rdev->mode_info.tv_std_property) {
@@ -315,7 +324,7 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
315 radeon_encoder = to_radeon_encoder(encoder); 324 radeon_encoder = to_radeon_encoder(encoder);
316 if (!radeon_encoder->enc_priv) 325 if (!radeon_encoder->enc_priv)
317 return 0; 326 return 0;
318 if (rdev->is_atom_bios) { 327 if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) {
319 struct radeon_encoder_atom_dac *dac_int; 328 struct radeon_encoder_atom_dac *dac_int;
320 dac_int = radeon_encoder->enc_priv; 329 dac_int = radeon_encoder->enc_priv;
321 dac_int->tv_std = val; 330 dac_int->tv_std = val;
@@ -940,7 +949,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
940 if (radeon_connector->edid) 949 if (radeon_connector->edid)
941 kfree(radeon_connector->edid); 950 kfree(radeon_connector->edid);
942 if (radeon_dig_connector->dp_i2c_bus) 951 if (radeon_dig_connector->dp_i2c_bus)
943 radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); 952 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
944 kfree(radeon_connector->con_priv); 953 kfree(radeon_connector->con_priv);
945 drm_sysfs_connector_remove(connector); 954 drm_sysfs_connector_remove(connector);
946 drm_connector_cleanup(connector); 955 drm_connector_cleanup(connector);
@@ -1307,6 +1316,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1307 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1316 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
1308 if (!radeon_connector->ddc_bus) 1317 if (!radeon_connector->ddc_bus)
1309 goto failed; 1318 goto failed;
1319 }
1320 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1310 radeon_connector->dac_load_detect = true; 1321 radeon_connector->dac_load_detect = true;
1311 drm_connector_attach_property(&radeon_connector->base, 1322 drm_connector_attach_property(&radeon_connector->base,
1312 rdev->mode_info.load_detect_property, 1323 rdev->mode_info.load_detect_property,
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index dc6eba6b96dd..2f042a3c0e62 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -417,8 +417,9 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
417 return -EBUSY; 417 return -EBUSY;
418} 418}
419 419
420static void radeon_init_pipes(drm_radeon_private_t *dev_priv) 420static void radeon_init_pipes(struct drm_device *dev)
421{ 421{
422 drm_radeon_private_t *dev_priv = dev->dev_private;
422 uint32_t gb_tile_config, gb_pipe_sel = 0; 423 uint32_t gb_tile_config, gb_pipe_sel = 0;
423 424
424 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) { 425 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) {
@@ -434,13 +435,19 @@ static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
434 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) { 435 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
435 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT); 436 gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
436 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1; 437 dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
438 /* SE cards have 1 pipe */
439 if ((dev->pdev->device == 0x5e4c) ||
440 (dev->pdev->device == 0x5e4f))
441 dev_priv->num_gb_pipes = 1;
437 } else { 442 } else {
438 /* R3xx */ 443 /* R3xx */
439 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) || 444 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300 &&
440 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) { 445 dev->pdev->device != 0x4144) ||
446 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350 &&
447 dev->pdev->device != 0x4148)) {
441 dev_priv->num_gb_pipes = 2; 448 dev_priv->num_gb_pipes = 2;
442 } else { 449 } else {
443 /* R3Vxx */ 450 /* RV3xx/R300 AD/R350 AH */
444 dev_priv->num_gb_pipes = 1; 451 dev_priv->num_gb_pipes = 1;
445 } 452 }
446 } 453 }
@@ -736,7 +743,7 @@ static int radeon_do_engine_reset(struct drm_device * dev)
736 743
737 /* setup the raster pipes */ 744 /* setup the raster pipes */
738 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300) 745 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
739 radeon_init_pipes(dev_priv); 746 radeon_init_pipes(dev);
740 747
741 /* Reset the CP ring */ 748 /* Reset the CP ring */
742 radeon_do_cp_reset(dev_priv); 749 radeon_do_cp_reset(dev_priv);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70ba02ed7723..f9b0fe002c0a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
193 radeon_bo_list_fence(&parser->validated, parser->ib->fence); 193 radeon_bo_list_fence(&parser->validated, parser->ib->fence);
194 } 194 }
195 radeon_bo_list_unreserve(&parser->validated); 195 radeon_bo_list_unreserve(&parser->validated);
196 for (i = 0; i < parser->nrelocs; i++) { 196 if (parser->relocs != NULL) {
197 if (parser->relocs[i].gobj) 197 for (i = 0; i < parser->nrelocs; i++) {
198 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 198 if (parser->relocs[i].gobj)
199 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
200 }
199 } 201 }
200 kfree(parser->track); 202 kfree(parser->track);
201 kfree(parser->relocs); 203 kfree(parser->relocs);
@@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
243 } 245 }
244 r = radeon_cs_parser_relocs(&parser); 246 r = radeon_cs_parser_relocs(&parser);
245 if (r) { 247 if (r) {
246 DRM_ERROR("Failed to parse relocation !\n"); 248 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r);
247 radeon_cs_parser_fini(&parser, r); 250 radeon_cs_parser_fini(&parser, r);
248 mutex_unlock(&rdev->cs_mutex); 251 mutex_unlock(&rdev->cs_mutex);
249 return r; 252 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e28e4ed5f720..7b629e305560 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -26,6 +26,7 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/console.h> 28#include <linux/console.h>
29#include <linux/slab.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
31#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
@@ -33,9 +34,56 @@
33#include <linux/vga_switcheroo.h> 34#include <linux/vga_switcheroo.h>
34#include "radeon_reg.h" 35#include "radeon_reg.h"
35#include "radeon.h" 36#include "radeon.h"
36#include "radeon_asic.h"
37#include "atom.h" 37#include "atom.h"
38 38
39static const char radeon_family_name[][16] = {
40 "R100",
41 "RV100",
42 "RS100",
43 "RV200",
44 "RS200",
45 "R200",
46 "RV250",
47 "RS300",
48 "RV280",
49 "R300",
50 "R350",
51 "RV350",
52 "RV380",
53 "R420",
54 "R423",
55 "RV410",
56 "RS400",
57 "RS480",
58 "RS600",
59 "RS690",
60 "RS740",
61 "RV515",
62 "R520",
63 "RV530",
64 "RV560",
65 "RV570",
66 "R580",
67 "R600",
68 "RV610",
69 "RV630",
70 "RV670",
71 "RV620",
72 "RV635",
73 "RS780",
74 "RS880",
75 "RV770",
76 "RV730",
77 "RV710",
78 "RV740",
79 "CEDAR",
80 "REDWOOD",
81 "JUNIPER",
82 "CYPRESS",
83 "HEMLOCK",
84 "LAST",
85};
86
39/* 87/*
40 * Clear GPU surface registers. 88 * Clear GPU surface registers.
41 */ 89 */
@@ -242,6 +290,36 @@ bool radeon_card_posted(struct radeon_device *rdev)
242 290
243} 291}
244 292
293void radeon_update_bandwidth_info(struct radeon_device *rdev)
294{
295 fixed20_12 a;
296 u32 sclk, mclk;
297
298 if (rdev->flags & RADEON_IS_IGP) {
299 sclk = radeon_get_engine_clock(rdev);
300 mclk = rdev->clock.default_mclk;
301
302 a.full = rfixed_const(100);
303 rdev->pm.sclk.full = rfixed_const(sclk);
304 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
305 rdev->pm.mclk.full = rfixed_const(mclk);
306 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
307
308 a.full = rfixed_const(16);
309 /* core_bandwidth = sclk(Mhz) * 16 */
310 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
311 } else {
312 sclk = radeon_get_engine_clock(rdev);
313 mclk = radeon_get_memory_clock(rdev);
314
315 a.full = rfixed_const(100);
316 rdev->pm.sclk.full = rfixed_const(sclk);
317 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
318 rdev->pm.mclk.full = rfixed_const(mclk);
319 rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
320 }
321}
322
245bool radeon_boot_test_post_card(struct radeon_device *rdev) 323bool radeon_boot_test_post_card(struct radeon_device *rdev)
246{ 324{
247 if (radeon_card_posted(rdev)) 325 if (radeon_card_posted(rdev))
@@ -288,181 +366,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
288} 366}
289 367
290 368
291/*
292 * Registers accessors functions.
293 */
294uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
295{
296 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
297 BUG_ON(1);
298 return 0;
299}
300
301void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
302{
303 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
304 reg, v);
305 BUG_ON(1);
306}
307
308void radeon_register_accessor_init(struct radeon_device *rdev)
309{
310 rdev->mc_rreg = &radeon_invalid_rreg;
311 rdev->mc_wreg = &radeon_invalid_wreg;
312 rdev->pll_rreg = &radeon_invalid_rreg;
313 rdev->pll_wreg = &radeon_invalid_wreg;
314 rdev->pciep_rreg = &radeon_invalid_rreg;
315 rdev->pciep_wreg = &radeon_invalid_wreg;
316
317 /* Don't change order as we are overridding accessor. */
318 if (rdev->family < CHIP_RV515) {
319 rdev->pcie_reg_mask = 0xff;
320 } else {
321 rdev->pcie_reg_mask = 0x7ff;
322 }
323 /* FIXME: not sure here */
324 if (rdev->family <= CHIP_R580) {
325 rdev->pll_rreg = &r100_pll_rreg;
326 rdev->pll_wreg = &r100_pll_wreg;
327 }
328 if (rdev->family >= CHIP_R420) {
329 rdev->mc_rreg = &r420_mc_rreg;
330 rdev->mc_wreg = &r420_mc_wreg;
331 }
332 if (rdev->family >= CHIP_RV515) {
333 rdev->mc_rreg = &rv515_mc_rreg;
334 rdev->mc_wreg = &rv515_mc_wreg;
335 }
336 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
337 rdev->mc_rreg = &rs400_mc_rreg;
338 rdev->mc_wreg = &rs400_mc_wreg;
339 }
340 if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
341 rdev->mc_rreg = &rs690_mc_rreg;
342 rdev->mc_wreg = &rs690_mc_wreg;
343 }
344 if (rdev->family == CHIP_RS600) {
345 rdev->mc_rreg = &rs600_mc_rreg;
346 rdev->mc_wreg = &rs600_mc_wreg;
347 }
348 if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
349 rdev->pciep_rreg = &r600_pciep_rreg;
350 rdev->pciep_wreg = &r600_pciep_wreg;
351 }
352}
353
354
355/*
356 * ASIC
357 */
358int radeon_asic_init(struct radeon_device *rdev)
359{
360 radeon_register_accessor_init(rdev);
361 switch (rdev->family) {
362 case CHIP_R100:
363 case CHIP_RV100:
364 case CHIP_RS100:
365 case CHIP_RV200:
366 case CHIP_RS200:
367 rdev->asic = &r100_asic;
368 break;
369 case CHIP_R200:
370 case CHIP_RV250:
371 case CHIP_RS300:
372 case CHIP_RV280:
373 rdev->asic = &r200_asic;
374 break;
375 case CHIP_R300:
376 case CHIP_R350:
377 case CHIP_RV350:
378 case CHIP_RV380:
379 if (rdev->flags & RADEON_IS_PCIE)
380 rdev->asic = &r300_asic_pcie;
381 else
382 rdev->asic = &r300_asic;
383 break;
384 case CHIP_R420:
385 case CHIP_R423:
386 case CHIP_RV410:
387 rdev->asic = &r420_asic;
388 break;
389 case CHIP_RS400:
390 case CHIP_RS480:
391 rdev->asic = &rs400_asic;
392 break;
393 case CHIP_RS600:
394 rdev->asic = &rs600_asic;
395 break;
396 case CHIP_RS690:
397 case CHIP_RS740:
398 rdev->asic = &rs690_asic;
399 break;
400 case CHIP_RV515:
401 rdev->asic = &rv515_asic;
402 break;
403 case CHIP_R520:
404 case CHIP_RV530:
405 case CHIP_RV560:
406 case CHIP_RV570:
407 case CHIP_R580:
408 rdev->asic = &r520_asic;
409 break;
410 case CHIP_R600:
411 case CHIP_RV610:
412 case CHIP_RV630:
413 case CHIP_RV620:
414 case CHIP_RV635:
415 case CHIP_RV670:
416 case CHIP_RS780:
417 case CHIP_RS880:
418 rdev->asic = &r600_asic;
419 break;
420 case CHIP_RV770:
421 case CHIP_RV730:
422 case CHIP_RV710:
423 case CHIP_RV740:
424 rdev->asic = &rv770_asic;
425 break;
426 case CHIP_CEDAR:
427 case CHIP_REDWOOD:
428 case CHIP_JUNIPER:
429 case CHIP_CYPRESS:
430 case CHIP_HEMLOCK:
431 rdev->asic = &evergreen_asic;
432 break;
433 default:
434 /* FIXME: not supported yet */
435 return -EINVAL;
436 }
437
438 if (rdev->flags & RADEON_IS_IGP) {
439 rdev->asic->get_memory_clock = NULL;
440 rdev->asic->set_memory_clock = NULL;
441 }
442
443 return 0;
444}
445
446
447/*
448 * Wrapper around modesetting bits.
449 */
450int radeon_clocks_init(struct radeon_device *rdev)
451{
452 int r;
453
454 r = radeon_static_clocks_init(rdev->ddev);
455 if (r) {
456 return r;
457 }
458 DRM_INFO("Clocks initialized !\n");
459 return 0;
460}
461
462void radeon_clocks_fini(struct radeon_device *rdev)
463{
464}
465
466/* ATOM accessor methods */ 369/* ATOM accessor methods */
467static uint32_t cail_pll_read(struct card_info *info, uint32_t reg) 370static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
468{ 371{
@@ -567,29 +470,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
567 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; 470 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
568} 471}
569 472
570void radeon_agp_disable(struct radeon_device *rdev)
571{
572 rdev->flags &= ~RADEON_IS_AGP;
573 if (rdev->family >= CHIP_R600) {
574 DRM_INFO("Forcing AGP to PCIE mode\n");
575 rdev->flags |= RADEON_IS_PCIE;
576 } else if (rdev->family >= CHIP_RV515 ||
577 rdev->family == CHIP_RV380 ||
578 rdev->family == CHIP_RV410 ||
579 rdev->family == CHIP_R423) {
580 DRM_INFO("Forcing AGP to PCIE mode\n");
581 rdev->flags |= RADEON_IS_PCIE;
582 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
583 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
584 } else {
585 DRM_INFO("Forcing AGP to PCI mode\n");
586 rdev->flags |= RADEON_IS_PCI;
587 rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
588 rdev->asic->gart_set_page = &r100_pci_gart_set_page;
589 }
590 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
591}
592
593void radeon_check_arguments(struct radeon_device *rdev) 473void radeon_check_arguments(struct radeon_device *rdev)
594{ 474{
595 /* vramlimit must be a power of two */ 475 /* vramlimit must be a power of two */
@@ -694,7 +574,6 @@ int radeon_device_init(struct radeon_device *rdev,
694 int r; 574 int r;
695 int dma_bits; 575 int dma_bits;
696 576
697 DRM_INFO("radeon: Initializing kernel modesetting.\n");
698 rdev->shutdown = false; 577 rdev->shutdown = false;
699 rdev->dev = &pdev->dev; 578 rdev->dev = &pdev->dev;
700 rdev->ddev = ddev; 579 rdev->ddev = ddev;
@@ -706,6 +585,10 @@ int radeon_device_init(struct radeon_device *rdev,
706 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; 585 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
707 rdev->gpu_lockup = false; 586 rdev->gpu_lockup = false;
708 rdev->accel_working = false; 587 rdev->accel_working = false;
588
589 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
590 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
591
709 /* mutex initialization are all done here so we 592 /* mutex initialization are all done here so we
710 * can recall function without having locking issues */ 593 * can recall function without having locking issues */
711 mutex_init(&rdev->cs_mutex); 594 mutex_init(&rdev->cs_mutex);
@@ -731,6 +614,14 @@ int radeon_device_init(struct radeon_device *rdev,
731 return r; 614 return r;
732 radeon_check_arguments(rdev); 615 radeon_check_arguments(rdev);
733 616
617 /* all of the newer IGP chips have an internal gart
618 * However some rs4xx report as AGP, so remove that here.
619 */
620 if ((rdev->family >= CHIP_RS400) &&
621 (rdev->flags & RADEON_IS_IGP)) {
622 rdev->flags &= ~RADEON_IS_AGP;
623 }
624
734 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { 625 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
735 radeon_agp_disable(rdev); 626 radeon_agp_disable(rdev);
736 } 627 }
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ba8d806dcf39..bb1c122cad21 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -86,12 +86,12 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); 86 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); 87 WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
88 88
89 WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); 89 WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); 90 WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
91 91
92 WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); 92 WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
93 for (i = 0; i < 256; i++) { 93 for (i = 0; i < 256; i++) {
94 WREG32(EVERGREEN_DC_LUT_30_COLOR, 94 WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
95 (radeon_crtc->lut_r[i] << 20) | 95 (radeon_crtc->lut_r[i] << 20) |
96 (radeon_crtc->lut_g[i] << 10) | 96 (radeon_crtc->lut_g[i] << 10) |
97 (radeon_crtc->lut_b[i] << 0)); 97 (radeon_crtc->lut_b[i] << 0));
@@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
368 368
369 if (rdev->bios) { 369 if (rdev->bios) {
370 if (rdev->is_atom_bios) { 370 if (rdev->is_atom_bios) {
371 if (rdev->family >= CHIP_R600) 371 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
372 if (ret == false)
372 ret = radeon_get_atom_connector_info_from_object_table(dev); 373 ret = radeon_get_atom_connector_info_from_object_table(dev);
373 else
374 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
375 } else { 374 } else {
376 ret = radeon_get_legacy_connector_info_from_bios(dev); 375 ret = radeon_get_legacy_connector_info_from_bios(dev);
377 if (ret == false) 376 if (ret == false)
@@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
469 uint32_t best_error = 0xffffffff; 468 uint32_t best_error = 0xffffffff;
470 uint32_t best_vco_diff = 1; 469 uint32_t best_vco_diff = 1;
471 uint32_t post_div; 470 uint32_t post_div;
471 u32 pll_out_min, pll_out_max;
472 472
473 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); 473 DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
474 freq = freq * 1000; 474 freq = freq * 1000;
475 475
476 if (pll->flags & RADEON_PLL_IS_LCD) {
477 pll_out_min = pll->lcd_pll_out_min;
478 pll_out_max = pll->lcd_pll_out_max;
479 } else {
480 pll_out_min = pll->pll_out_min;
481 pll_out_max = pll->pll_out_max;
482 }
483
476 if (pll->flags & RADEON_PLL_USE_REF_DIV) 484 if (pll->flags & RADEON_PLL_USE_REF_DIV)
477 min_ref_div = max_ref_div = pll->reference_div; 485 min_ref_div = max_ref_div = pll->reference_div;
478 else { 486 else {
@@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
536 tmp = (uint64_t)pll->reference_freq * feedback_div; 544 tmp = (uint64_t)pll->reference_freq * feedback_div;
537 vco = radeon_div(tmp, ref_div); 545 vco = radeon_div(tmp, ref_div);
538 546
539 if (vco < pll->pll_out_min) { 547 if (vco < pll_out_min) {
540 min_feed_div = feedback_div + 1; 548 min_feed_div = feedback_div + 1;
541 continue; 549 continue;
542 } else if (vco > pll->pll_out_max) { 550 } else if (vco > pll_out_max) {
543 max_feed_div = feedback_div; 551 max_feed_div = feedback_div;
544 continue; 552 continue;
545 } 553 }
@@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll,
675{ 683{
676 fixed20_12 ffreq, max_error, error, pll_out, a; 684 fixed20_12 ffreq, max_error, error, pll_out, a;
677 u32 vco; 685 u32 vco;
686 u32 pll_out_min, pll_out_max;
687
688 if (pll->flags & RADEON_PLL_IS_LCD) {
689 pll_out_min = pll->lcd_pll_out_min;
690 pll_out_max = pll->lcd_pll_out_max;
691 } else {
692 pll_out_min = pll->pll_out_min;
693 pll_out_max = pll->pll_out_max;
694 }
678 695
679 ffreq.full = rfixed_const(freq); 696 ffreq.full = rfixed_const(freq);
680 /* max_error = ffreq * 0.0025; */ 697 /* max_error = ffreq * 0.0025; */
@@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll,
686 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); 703 vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
687 vco = vco / ((*ref_div) * 10); 704 vco = vco / ((*ref_div) * 10);
688 705
689 if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) 706 if ((vco < pll_out_min) || (vco > pll_out_max))
690 continue; 707 continue;
691 708
692 /* pll_out = vco / post_div; */ 709 /* pll_out = vco / post_div; */
@@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
714{ 731{
715 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; 732 u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
716 u32 best_freq = 0, vco_frequency; 733 u32 best_freq = 0, vco_frequency;
734 u32 pll_out_min, pll_out_max;
735
736 if (pll->flags & RADEON_PLL_IS_LCD) {
737 pll_out_min = pll->lcd_pll_out_min;
738 pll_out_max = pll->lcd_pll_out_max;
739 } else {
740 pll_out_min = pll->pll_out_min;
741 pll_out_max = pll->pll_out_max;
742 }
717 743
718 /* freq = freq / 10; */ 744 /* freq = freq / 10; */
719 do_div(freq, 10); 745 do_div(freq, 10);
@@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
724 goto done; 750 goto done;
725 751
726 vco_frequency = freq * post_div; 752 vco_frequency = freq * post_div;
727 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) 753 if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
728 goto done; 754 goto done;
729 755
730 if (pll->flags & RADEON_PLL_USE_REF_DIV) { 756 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
@@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
749 continue; 775 continue;
750 776
751 vco_frequency = freq * post_div; 777 vco_frequency = freq * post_div;
752 if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) 778 if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
753 continue; 779 continue;
754 if (pll->flags & RADEON_PLL_USE_REF_DIV) { 780 if (pll->flags & RADEON_PLL_USE_REF_DIV) {
755 ref_div = pll->reference_div; 781 ref_div = pll->reference_div;
@@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
945 return 0; 971 return 0;
946} 972}
947 973
974void radeon_update_display_priority(struct radeon_device *rdev)
975{
976 /* adjustment options for the display watermarks */
977 if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
978 /* set display priority to high for r3xx, rv515 chips
979 * this avoids flickering due to underflow to the
980 * display controllers during heavy acceleration.
981 */
982 if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
983 rdev->disp_priority = 2;
984 else
985 rdev->disp_priority = 0;
986 } else
987 rdev->disp_priority = radeon_disp_priority;
988
989}
990
948int radeon_modeset_init(struct radeon_device *rdev) 991int radeon_modeset_init(struct radeon_device *rdev)
949{ 992{
950 int i; 993 int i;
@@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
976 radeon_combios_check_hardcoded_edid(rdev); 1019 radeon_combios_check_hardcoded_edid(rdev);
977 } 1020 }
978 1021
979 if (rdev->flags & RADEON_SINGLE_CRTC)
980 rdev->num_crtc = 1;
981 else {
982 if (ASIC_IS_DCE4(rdev))
983 rdev->num_crtc = 6;
984 else
985 rdev->num_crtc = 2;
986 }
987
988 /* allocate crtcs */ 1022 /* allocate crtcs */
989 for (i = 0; i < rdev->num_crtc; i++) { 1023 for (i = 0; i < rdev->num_crtc; i++) {
990 radeon_crtc_init(rdev->ddev, i); 1024 radeon_crtc_init(rdev->ddev, i);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6eec0ece6a6c..4b05563d99e1 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -42,9 +42,11 @@
42 * KMS wrapper. 42 * KMS wrapper.
43 * - 2.0.0 - initial interface 43 * - 2.0.0 - initial interface
44 * - 2.1.0 - add square tiling interface 44 * - 2.1.0 - add square tiling interface
45 * - 2.2.0 - add r6xx/r7xx const buffer support
46 * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
45 */ 47 */
46#define KMS_DRIVER_MAJOR 2 48#define KMS_DRIVER_MAJOR 2
47#define KMS_DRIVER_MINOR 1 49#define KMS_DRIVER_MINOR 3
48#define KMS_DRIVER_PATCHLEVEL 0 50#define KMS_DRIVER_PATCHLEVEL 0
49int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 51int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
50int radeon_driver_unload_kms(struct drm_device *dev); 52int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,6 +93,8 @@ int radeon_tv = 1;
91int radeon_new_pll = -1; 93int radeon_new_pll = -1;
92int radeon_dynpm = -1; 94int radeon_dynpm = -1;
93int radeon_audio = 1; 95int radeon_audio = 1;
96int radeon_disp_priority = 0;
97int radeon_hw_i2c = 0;
94 98
95MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 99MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
96module_param_named(no_wb, radeon_no_wb, int, 0444); 100module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -134,6 +138,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444);
134MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 138MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
135module_param_named(audio, radeon_audio, int, 0444); 139module_param_named(audio, radeon_audio, int, 0444);
136 140
141MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
142module_param_named(disp_priority, radeon_disp_priority, int, 0444);
143
144MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
145module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
146
137static int radeon_suspend(struct drm_device *dev, pm_message_t state) 147static int radeon_suspend(struct drm_device *dev, pm_message_t state)
138{ 148{
139 drm_radeon_private_t *dev_priv = dev->dev_private; 149 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index ec55f2b23c22..448eba89d1e6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -107,9 +107,10 @@
107 * 1.30- Add support for occlusion queries 107 * 1.30- Add support for occlusion queries
108 * 1.31- Add support for num Z pipes from GET_PARAM 108 * 1.31- Add support for num Z pipes from GET_PARAM
109 * 1.32- fixes for rv740 setup 109 * 1.32- fixes for rv740 setup
110 * 1.33- Add r6xx/r7xx const buffer support
110 */ 111 */
111#define DRIVER_MAJOR 1 112#define DRIVER_MAJOR 1
112#define DRIVER_MINOR 32 113#define DRIVER_MINOR 33
113#define DRIVER_PATCHLEVEL 0 114#define DRIVER_PATCHLEVEL 0
114 115
115enum radeon_cp_microcode_version { 116enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bc926ea0a530..c5ddaf58563a 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -254,6 +254,53 @@ radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
254 return dig_connector; 254 return dig_connector;
255} 255}
256 256
257void radeon_panel_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *adjusted_mode)
259{
260 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
261 struct drm_device *dev = encoder->dev;
262 struct radeon_device *rdev = dev->dev_private;
263 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
264 unsigned hblank = native_mode->htotal - native_mode->hdisplay;
265 unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
266 unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
267 unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
268 unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
269 unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
270
271 adjusted_mode->clock = native_mode->clock;
272 adjusted_mode->flags = native_mode->flags;
273
274 if (ASIC_IS_AVIVO(rdev)) {
275 adjusted_mode->hdisplay = native_mode->hdisplay;
276 adjusted_mode->vdisplay = native_mode->vdisplay;
277 }
278
279 adjusted_mode->htotal = native_mode->hdisplay + hblank;
280 adjusted_mode->hsync_start = native_mode->hdisplay + hover;
281 adjusted_mode->hsync_end = adjusted_mode->hsync_start + hsync_width;
282
283 adjusted_mode->vtotal = native_mode->vdisplay + vblank;
284 adjusted_mode->vsync_start = native_mode->vdisplay + vover;
285 adjusted_mode->vsync_end = adjusted_mode->vsync_start + vsync_width;
286
287 drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
288
289 if (ASIC_IS_AVIVO(rdev)) {
290 adjusted_mode->crtc_hdisplay = native_mode->hdisplay;
291 adjusted_mode->crtc_vdisplay = native_mode->vdisplay;
292 }
293
294 adjusted_mode->crtc_htotal = adjusted_mode->crtc_hdisplay + hblank;
295 adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hdisplay + hover;
296 adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + hsync_width;
297
298 adjusted_mode->crtc_vtotal = adjusted_mode->crtc_vdisplay + vblank;
299 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + vover;
300 adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + vsync_width;
301
302}
303
257static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 304static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
258 struct drm_display_mode *mode, 305 struct drm_display_mode *mode,
259 struct drm_display_mode *adjusted_mode) 306 struct drm_display_mode *adjusted_mode)
@@ -275,18 +322,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
275 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 322 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
276 323
277 /* get the native mode for LVDS */ 324 /* get the native mode for LVDS */
278 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 325 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
279 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 326 radeon_panel_mode_fixup(encoder, adjusted_mode);
280 int mode_id = adjusted_mode->base.id;
281 *adjusted_mode = *native_mode;
282 if (!ASIC_IS_AVIVO(rdev)) {
283 adjusted_mode->hdisplay = mode->hdisplay;
284 adjusted_mode->vdisplay = mode->vdisplay;
285 adjusted_mode->crtc_hdisplay = mode->hdisplay;
286 adjusted_mode->crtc_vdisplay = mode->vdisplay;
287 }
288 adjusted_mode->base.id = mode_id;
289 }
290 327
291 /* get the native mode for TV */ 328 /* get the native mode for TV */
292 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { 329 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
@@ -302,7 +339,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
302 } 339 }
303 340
304 if (ASIC_IS_DCE3(rdev) && 341 if (ASIC_IS_DCE3(rdev) &&
305 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { 342 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
306 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 343 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
307 radeon_dp_set_link_config(connector, mode); 344 radeon_dp_set_link_config(connector, mode);
308 } 345 }
@@ -317,12 +354,8 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
317 struct radeon_device *rdev = dev->dev_private; 354 struct radeon_device *rdev = dev->dev_private;
318 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 355 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
319 DAC_ENCODER_CONTROL_PS_ALLOCATION args; 356 DAC_ENCODER_CONTROL_PS_ALLOCATION args;
320 int index = 0, num = 0; 357 int index = 0;
321 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 358 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
322 enum radeon_tv_std tv_std = TV_STD_NTSC;
323
324 if (dac_info->tv_std)
325 tv_std = dac_info->tv_std;
326 359
327 memset(&args, 0, sizeof(args)); 360 memset(&args, 0, sizeof(args));
328 361
@@ -330,12 +363,10 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
330 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 363 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
331 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 364 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
332 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl); 365 index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
333 num = 1;
334 break; 366 break;
335 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 367 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
336 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 368 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
337 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl); 369 index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
338 num = 2;
339 break; 370 break;
340 } 371 }
341 372
@@ -346,7 +377,7 @@ atombios_dac_setup(struct drm_encoder *encoder, int action)
346 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 377 else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
347 args.ucDacStandard = ATOM_DAC1_CV; 378 args.ucDacStandard = ATOM_DAC1_CV;
348 else { 379 else {
349 switch (tv_std) { 380 switch (dac_info->tv_std) {
350 case TV_STD_PAL: 381 case TV_STD_PAL:
351 case TV_STD_PAL_M: 382 case TV_STD_PAL_M:
352 case TV_STD_SCART_PAL: 383 case TV_STD_SCART_PAL:
@@ -377,10 +408,6 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
377 TV_ENCODER_CONTROL_PS_ALLOCATION args; 408 TV_ENCODER_CONTROL_PS_ALLOCATION args;
378 int index = 0; 409 int index = 0;
379 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; 410 struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
380 enum radeon_tv_std tv_std = TV_STD_NTSC;
381
382 if (dac_info->tv_std)
383 tv_std = dac_info->tv_std;
384 411
385 memset(&args, 0, sizeof(args)); 412 memset(&args, 0, sizeof(args));
386 413
@@ -391,7 +418,7 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
391 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) 418 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
392 args.sTVEncoder.ucTvStandard = ATOM_TV_CV; 419 args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
393 else { 420 else {
394 switch (tv_std) { 421 switch (dac_info->tv_std) {
395 case TV_STD_NTSC: 422 case TV_STD_NTSC:
396 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC; 423 args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
397 break; 424 break;
@@ -519,7 +546,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
519 break; 546 break;
520 } 547 }
521 548
522 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 549 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
550 return;
523 551
524 switch (frev) { 552 switch (frev) {
525 case 1: 553 case 1:
@@ -593,7 +621,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
593 } 621 }
594 622
595 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 623 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
596 r600_hdmi_enable(encoder, hdmi_detected);
597} 624}
598 625
599int 626int
@@ -708,7 +735,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
708 struct radeon_connector_atom_dig *dig_connector = 735 struct radeon_connector_atom_dig *dig_connector =
709 radeon_get_atom_connector_priv_from_encoder(encoder); 736 radeon_get_atom_connector_priv_from_encoder(encoder);
710 union dig_encoder_control args; 737 union dig_encoder_control args;
711 int index = 0, num = 0; 738 int index = 0;
712 uint8_t frev, crev; 739 uint8_t frev, crev;
713 740
714 if (!dig || !dig_connector) 741 if (!dig || !dig_connector)
@@ -724,9 +751,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
724 else 751 else
725 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 752 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
726 } 753 }
727 num = dig->dig_encoder + 1;
728 754
729 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 755 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
756 return;
730 757
731 args.v1.ucAction = action; 758 args.v1.ucAction = action;
732 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 759 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -785,7 +812,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
785 struct drm_connector *connector; 812 struct drm_connector *connector;
786 struct radeon_connector *radeon_connector; 813 struct radeon_connector *radeon_connector;
787 union dig_transmitter_control args; 814 union dig_transmitter_control args;
788 int index = 0, num = 0; 815 int index = 0;
789 uint8_t frev, crev; 816 uint8_t frev, crev;
790 bool is_dp = false; 817 bool is_dp = false;
791 int pll_id = 0; 818 int pll_id = 0;
@@ -814,7 +841,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
814 } 841 }
815 } 842 }
816 843
817 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 844 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
845 return;
818 846
819 args.v1.ucAction = action; 847 args.v1.ucAction = action;
820 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 848 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
@@ -860,15 +888,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
860 switch (radeon_encoder->encoder_id) { 888 switch (radeon_encoder->encoder_id) {
861 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 889 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
862 args.v3.acConfig.ucTransmitterSel = 0; 890 args.v3.acConfig.ucTransmitterSel = 0;
863 num = 0;
864 break; 891 break;
865 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 892 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
866 args.v3.acConfig.ucTransmitterSel = 1; 893 args.v3.acConfig.ucTransmitterSel = 1;
867 num = 1;
868 break; 894 break;
869 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 895 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
870 args.v3.acConfig.ucTransmitterSel = 2; 896 args.v3.acConfig.ucTransmitterSel = 2;
871 num = 2;
872 break; 897 break;
873 } 898 }
874 899
@@ -877,25 +902,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
877 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 902 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
878 if (dig->coherent_mode) 903 if (dig->coherent_mode)
879 args.v3.acConfig.fCoherentMode = 1; 904 args.v3.acConfig.fCoherentMode = 1;
905 if (radeon_encoder->pixel_clock > 165000)
906 args.v3.acConfig.fDualLinkConnector = 1;
880 } 907 }
881 } else if (ASIC_IS_DCE32(rdev)) { 908 } else if (ASIC_IS_DCE32(rdev)) {
882 if (dig->dig_encoder == 1) 909 args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
883 args.v2.acConfig.ucEncoderSel = 1;
884 if (dig_connector->linkb) 910 if (dig_connector->linkb)
885 args.v2.acConfig.ucLinkSel = 1; 911 args.v2.acConfig.ucLinkSel = 1;
886 912
887 switch (radeon_encoder->encoder_id) { 913 switch (radeon_encoder->encoder_id) {
888 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 914 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
889 args.v2.acConfig.ucTransmitterSel = 0; 915 args.v2.acConfig.ucTransmitterSel = 0;
890 num = 0;
891 break; 916 break;
892 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 917 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
893 args.v2.acConfig.ucTransmitterSel = 1; 918 args.v2.acConfig.ucTransmitterSel = 1;
894 num = 1;
895 break; 919 break;
896 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 920 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
897 args.v2.acConfig.ucTransmitterSel = 2; 921 args.v2.acConfig.ucTransmitterSel = 2;
898 num = 2;
899 break; 922 break;
900 } 923 }
901 924
@@ -904,6 +927,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
904 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 927 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
905 if (dig->coherent_mode) 928 if (dig->coherent_mode)
906 args.v2.acConfig.fCoherentMode = 1; 929 args.v2.acConfig.fCoherentMode = 1;
930 if (radeon_encoder->pixel_clock > 165000)
931 args.v2.acConfig.fDualLinkConnector = 1;
907 } 932 }
908 } else { 933 } else {
909 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 934 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
@@ -913,31 +938,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
913 else 938 else
914 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 939 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
915 940
916 switch (radeon_encoder->encoder_id) { 941 if ((rdev->flags & RADEON_IS_IGP) &&
917 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 942 (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
918 if (rdev->flags & RADEON_IS_IGP) { 943 if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
919 if (radeon_encoder->pixel_clock > 165000) { 944 if (dig_connector->igp_lane_info & 0x1)
920 if (dig_connector->igp_lane_info & 0x3) 945 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
921 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 946 else if (dig_connector->igp_lane_info & 0x2)
922 else if (dig_connector->igp_lane_info & 0xc) 947 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
923 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 948 else if (dig_connector->igp_lane_info & 0x4)
924 } else { 949 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
925 if (dig_connector->igp_lane_info & 0x1) 950 else if (dig_connector->igp_lane_info & 0x8)
926 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 951 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
927 else if (dig_connector->igp_lane_info & 0x2) 952 } else {
928 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7; 953 if (dig_connector->igp_lane_info & 0x3)
929 else if (dig_connector->igp_lane_info & 0x4) 954 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
930 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11; 955 else if (dig_connector->igp_lane_info & 0xc)
931 else if (dig_connector->igp_lane_info & 0x8) 956 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
932 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
933 }
934 } 957 }
935 break;
936 } 958 }
937 959
938 if (radeon_encoder->pixel_clock > 165000)
939 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
940
941 if (dig_connector->linkb) 960 if (dig_connector->linkb)
942 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; 961 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
943 else 962 else
@@ -948,6 +967,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
948 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 967 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
949 if (dig->coherent_mode) 968 if (dig->coherent_mode)
950 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; 969 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
970 if (radeon_encoder->pixel_clock > 165000)
971 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
951 } 972 }
952 } 973 }
953 974
@@ -1054,16 +1075,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1054 if (is_dig) { 1075 if (is_dig) {
1055 switch (mode) { 1076 switch (mode) {
1056 case DRM_MODE_DPMS_ON: 1077 case DRM_MODE_DPMS_ON:
1057 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1078 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1058 {
1059 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1079 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1080
1060 dp_link_train(encoder, connector); 1081 dp_link_train(encoder, connector);
1082 if (ASIC_IS_DCE4(rdev))
1083 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
1061 } 1084 }
1085 if (!ASIC_IS_DCE4(rdev))
1086 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1062 break; 1087 break;
1063 case DRM_MODE_DPMS_STANDBY: 1088 case DRM_MODE_DPMS_STANDBY:
1064 case DRM_MODE_DPMS_SUSPEND: 1089 case DRM_MODE_DPMS_SUSPEND:
1065 case DRM_MODE_DPMS_OFF: 1090 case DRM_MODE_DPMS_OFF:
1066 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); 1091 if (!ASIC_IS_DCE4(rdev))
1092 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
1093 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1094 if (ASIC_IS_DCE4(rdev))
1095 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
1096 }
1067 break; 1097 break;
1068 } 1098 }
1069 } else { 1099 } else {
@@ -1104,7 +1134,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1104 1134
1105 memset(&args, 0, sizeof(args)); 1135 memset(&args, 0, sizeof(args));
1106 1136
1107 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 1137 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1138 return;
1108 1139
1109 switch (frev) { 1140 switch (frev) {
1110 case 1: 1141 case 1:
@@ -1216,6 +1247,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1216 } 1247 }
1217 1248
1218 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 1249 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1250
1251 /* update scratch regs with new routing */
1252 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
1219} 1253}
1220 1254
1221static void 1255static void
@@ -1326,20 +1360,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1326 struct drm_device *dev = encoder->dev; 1360 struct drm_device *dev = encoder->dev;
1327 struct radeon_device *rdev = dev->dev_private; 1361 struct radeon_device *rdev = dev->dev_private;
1328 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1329 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1330 1363
1331 if (radeon_encoder->active_device &
1332 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1333 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1334 if (dig)
1335 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1336 }
1337 radeon_encoder->pixel_clock = adjusted_mode->clock; 1364 radeon_encoder->pixel_clock = adjusted_mode->clock;
1338 1365
1339 radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 1366 if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
1340 atombios_set_encoder_crtc_source(encoder);
1341
1342 if (ASIC_IS_AVIVO(rdev)) {
1343 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) 1367 if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
1344 atombios_yuv_setup(encoder, true); 1368 atombios_yuv_setup(encoder, true);
1345 else 1369 else
@@ -1390,15 +1414,20 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1390 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1414 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
1391 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 1415 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
1392 atombios_dac_setup(encoder, ATOM_ENABLE); 1416 atombios_dac_setup(encoder, ATOM_ENABLE);
1393 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 1417 if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
1394 atombios_tv_setup(encoder, ATOM_ENABLE); 1418 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
1419 atombios_tv_setup(encoder, ATOM_ENABLE);
1420 else
1421 atombios_tv_setup(encoder, ATOM_DISABLE);
1422 }
1395 break; 1423 break;
1396 } 1424 }
1397 atombios_apply_encoder_quirks(encoder, adjusted_mode); 1425 atombios_apply_encoder_quirks(encoder, adjusted_mode);
1398 1426
1399 /* XXX */ 1427 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1400 if (!ASIC_IS_DCE4(rdev)) 1428 r600_hdmi_enable(encoder);
1401 r600_hdmi_setmode(encoder, adjusted_mode); 1429 r600_hdmi_setmode(encoder, adjusted_mode);
1430 }
1402} 1431}
1403 1432
1404static bool 1433static bool
@@ -1418,7 +1447,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
1418 1447
1419 memset(&args, 0, sizeof(args)); 1448 memset(&args, 0, sizeof(args));
1420 1449
1421 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); 1450 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
1451 return false;
1422 1452
1423 args.sDacload.ucMisc = 0; 1453 args.sDacload.ucMisc = 0;
1424 1454
@@ -1492,8 +1522,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1492 1522
1493static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 1523static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
1494{ 1524{
1525 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1526
1527 if (radeon_encoder->active_device &
1528 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1529 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1530 if (dig)
1531 dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
1532 }
1533
1495 radeon_atom_output_lock(encoder, true); 1534 radeon_atom_output_lock(encoder, true);
1496 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1535 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1536
1537 /* this is needed for the pll/ss setup to work correctly in some cases */
1538 atombios_set_encoder_crtc_source(encoder);
1497} 1539}
1498 1540
1499static void radeon_atom_encoder_commit(struct drm_encoder *encoder) 1541static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1509,6 +1551,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
1509 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 1551 radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
1510 1552
1511 if (radeon_encoder_is_digital(encoder)) { 1553 if (radeon_encoder_is_digital(encoder)) {
1554 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
1555 r600_hdmi_disable(encoder);
1512 dig = radeon_encoder->enc_priv; 1556 dig = radeon_encoder->enc_priv;
1513 dig->dig_encoder = -1; 1557 dig->dig_encoder = -1;
1514 } 1558 }
@@ -1549,12 +1593,14 @@ static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
1549struct radeon_encoder_atom_dac * 1593struct radeon_encoder_atom_dac *
1550radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) 1594radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
1551{ 1595{
1596 struct drm_device *dev = radeon_encoder->base.dev;
1597 struct radeon_device *rdev = dev->dev_private;
1552 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); 1598 struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
1553 1599
1554 if (!dac) 1600 if (!dac)
1555 return NULL; 1601 return NULL;
1556 1602
1557 dac->tv_std = TV_STD_NTSC; 1603 dac->tv_std = radeon_atombios_get_tv_info(rdev);
1558 return dac; 1604 return dac;
1559} 1605}
1560 1606
@@ -1632,6 +1678,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1632 break; 1678 break;
1633 case ENCODER_OBJECT_ID_INTERNAL_DAC1: 1679 case ENCODER_OBJECT_ID_INTERNAL_DAC1:
1634 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); 1680 drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
1681 radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
1635 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); 1682 drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
1636 break; 1683 break;
1637 case ENCODER_OBJECT_ID_INTERNAL_DAC2: 1684 case ENCODER_OBJECT_ID_INTERNAL_DAC2:
@@ -1659,6 +1706,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1659 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); 1706 drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
1660 break; 1707 break;
1661 } 1708 }
1662
1663 r600_hdmi_init(encoder);
1664} 1709}
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 93c7d5d41914..e329066dcabd 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -36,7 +36,7 @@
36 * Radeon chip families 36 * Radeon chip families
37 */ 37 */
38enum radeon_family { 38enum radeon_family {
39 CHIP_R100, 39 CHIP_R100 = 0,
40 CHIP_RV100, 40 CHIP_RV100,
41 CHIP_RS100, 41 CHIP_RS100,
42 CHIP_RV200, 42 CHIP_RV200,
@@ -99,4 +99,5 @@ enum radeon_chip_flags {
99 RADEON_IS_PCI = 0x00800000UL, 99 RADEON_IS_PCI = 0x00800000UL,
100 RADEON_IS_IGPGART = 0x01000000UL, 100 RADEON_IS_IGPGART = 0x01000000UL,
101}; 101};
102
102#endif 103#endif
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 8fccbf29235e..9ac57a09784b 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -28,6 +28,7 @@
28 */ 28 */
29 29
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/slab.h>
31#include <linux/fb.h> 32#include <linux/fb.h>
32 33
33#include "drmP.h" 34#include "drmP.h"
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 8495d4e32e18..d90f95b405c5 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -33,6 +33,7 @@
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/list.h> 34#include <linux/list.h>
35#include <linux/kref.h> 35#include <linux/kref.h>
36#include <linux/slab.h>
36#include "drmP.h" 37#include "drmP.h"
37#include "drm.h" 38#include "drm.h"
38#include "radeon_reg.h" 39#include "radeon_reg.h"
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 4ae50c19589f..5def6f5dff38 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59 return false; 59 return false;
60} 60}
61 61
62/* bit banging i2c */
62 63
63static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) 64static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
64{ 65{
@@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data)
181 WREG32(rec->en_data_reg, val); 182 WREG32(rec->en_data_reg, val);
182} 183}
183 184
185static int pre_xfer(struct i2c_adapter *i2c_adap)
186{
187 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
188
189 radeon_i2c_do_lock(i2c, 1);
190
191 return 0;
192}
193
194static void post_xfer(struct i2c_adapter *i2c_adap)
195{
196 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
197
198 radeon_i2c_do_lock(i2c, 0);
199}
200
201/* hw i2c */
202
184static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) 203static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
185{ 204{
186 struct radeon_pll *spll = &rdev->clock.spll;
187 u32 sclk = radeon_get_engine_clock(rdev); 205 u32 sclk = radeon_get_engine_clock(rdev);
188 u32 prescale = 0; 206 u32 prescale = 0;
189 u32 n, m; 207 u32 nm;
190 u8 loop; 208 u8 n, m, loop;
191 int i2c_clock; 209 int i2c_clock;
192 210
193 switch (rdev->family) { 211 switch (rdev->family) {
@@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
203 case CHIP_R300: 221 case CHIP_R300:
204 case CHIP_R350: 222 case CHIP_R350:
205 case CHIP_RV350: 223 case CHIP_RV350:
206 n = (spll->reference_freq) / (4 * 6); 224 i2c_clock = 60;
225 nm = (sclk * 10) / (i2c_clock * 4);
207 for (loop = 1; loop < 255; loop++) { 226 for (loop = 1; loop < 255; loop++) {
208 if ((loop * (loop - 1)) > n) 227 if ((nm / loop) < loop)
209 break; 228 break;
210 } 229 }
211 m = loop - 1; 230 n = loop - 1;
212 prescale = m | (loop << 8); 231 m = loop - 2;
232 prescale = m | (n << 8);
213 break; 233 break;
214 case CHIP_RV380: 234 case CHIP_RV380:
215 case CHIP_RS400: 235 case CHIP_RS400:
@@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
217 case CHIP_R420: 237 case CHIP_R420:
218 case CHIP_R423: 238 case CHIP_R423:
219 case CHIP_RV410: 239 case CHIP_RV410:
220 sclk = radeon_get_engine_clock(rdev);
221 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; 240 prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
222 break; 241 break;
223 case CHIP_RS600: 242 case CHIP_RS600:
@@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
232 case CHIP_RV570: 251 case CHIP_RV570:
233 case CHIP_R580: 252 case CHIP_R580:
234 i2c_clock = 50; 253 i2c_clock = 50;
235 sclk = radeon_get_engine_clock(rdev);
236 if (rdev->family == CHIP_R520) 254 if (rdev->family == CHIP_R520)
237 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); 255 prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
238 else 256 else
@@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
291 prescale = radeon_get_i2c_prescale(rdev); 309 prescale = radeon_get_i2c_prescale(rdev);
292 310
293 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | 311 reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
312 RADEON_I2C_DRIVE_EN |
294 RADEON_I2C_START | 313 RADEON_I2C_START |
295 RADEON_I2C_STOP | 314 RADEON_I2C_STOP |
296 RADEON_I2C_GO); 315 RADEON_I2C_GO);
@@ -757,26 +776,13 @@ done:
757 return ret; 776 return ret;
758} 777}
759 778
760static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, 779static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
761 struct i2c_msg *msgs, int num) 780 struct i2c_msg *msgs, int num)
762{ 781{
763 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); 782 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
764 int ret;
765
766 radeon_i2c_do_lock(i2c, 1);
767 ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
768 radeon_i2c_do_lock(i2c, 0);
769
770 return ret;
771}
772
773static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
774 struct i2c_msg *msgs, int num)
775{
776 struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
777 struct radeon_device *rdev = i2c->dev->dev_private; 783 struct radeon_device *rdev = i2c->dev->dev_private;
778 struct radeon_i2c_bus_rec *rec = &i2c->rec; 784 struct radeon_i2c_bus_rec *rec = &i2c->rec;
779 int ret; 785 int ret = 0;
780 786
781 switch (rdev->family) { 787 switch (rdev->family) {
782 case CHIP_R100: 788 case CHIP_R100:
@@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
797 case CHIP_RV410: 803 case CHIP_RV410:
798 case CHIP_RS400: 804 case CHIP_RS400:
799 case CHIP_RS480: 805 case CHIP_RS480:
800 if (rec->hw_capable) 806 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
801 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
802 else
803 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
804 break; 807 break;
805 case CHIP_RS600: 808 case CHIP_RS600:
806 case CHIP_RS690: 809 case CHIP_RS690:
807 case CHIP_RS740: 810 case CHIP_RS740:
808 /* XXX fill in hw i2c implementation */ 811 /* XXX fill in hw i2c implementation */
809 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
810 break; 812 break;
811 case CHIP_RV515: 813 case CHIP_RV515:
812 case CHIP_R520: 814 case CHIP_R520:
@@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
814 case CHIP_RV560: 816 case CHIP_RV560:
815 case CHIP_RV570: 817 case CHIP_RV570:
816 case CHIP_R580: 818 case CHIP_R580:
817 if (rec->hw_capable) { 819 if (rec->mm_i2c)
818 if (rec->mm_i2c) 820 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
819 ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); 821 else
820 else 822 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
821 ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
822 } else
823 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
824 break; 823 break;
825 case CHIP_R600: 824 case CHIP_R600:
826 case CHIP_RV610: 825 case CHIP_RV610:
827 case CHIP_RV630: 826 case CHIP_RV630:
828 case CHIP_RV670: 827 case CHIP_RV670:
829 /* XXX fill in hw i2c implementation */ 828 /* XXX fill in hw i2c implementation */
830 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
831 break; 829 break;
832 case CHIP_RV620: 830 case CHIP_RV620:
833 case CHIP_RV635: 831 case CHIP_RV635:
@@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
838 case CHIP_RV710: 836 case CHIP_RV710:
839 case CHIP_RV740: 837 case CHIP_RV740:
840 /* XXX fill in hw i2c implementation */ 838 /* XXX fill in hw i2c implementation */
841 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
842 break; 839 break;
843 case CHIP_CEDAR: 840 case CHIP_CEDAR:
844 case CHIP_REDWOOD: 841 case CHIP_REDWOOD:
@@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
846 case CHIP_CYPRESS: 843 case CHIP_CYPRESS:
847 case CHIP_HEMLOCK: 844 case CHIP_HEMLOCK:
848 /* XXX fill in hw i2c implementation */ 845 /* XXX fill in hw i2c implementation */
849 ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
850 break; 846 break;
851 default: 847 default:
852 DRM_ERROR("i2c: unhandled radeon chip\n"); 848 DRM_ERROR("i2c: unhandled radeon chip\n");
@@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
857 return ret; 853 return ret;
858} 854}
859 855
860static u32 radeon_i2c_func(struct i2c_adapter *adap) 856static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
861{ 857{
862 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 858 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
863} 859}
864 860
865static const struct i2c_algorithm radeon_i2c_algo = { 861static const struct i2c_algorithm radeon_i2c_algo = {
866 .master_xfer = radeon_i2c_xfer, 862 .master_xfer = radeon_hw_i2c_xfer,
867 .functionality = radeon_i2c_func, 863 .functionality = radeon_hw_i2c_func,
868}; 864};
869 865
870struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 866struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
871 struct radeon_i2c_bus_rec *rec, 867 struct radeon_i2c_bus_rec *rec,
872 const char *name) 868 const char *name)
873{ 869{
870 struct radeon_device *rdev = dev->dev_private;
874 struct radeon_i2c_chan *i2c; 871 struct radeon_i2c_chan *i2c;
875 int ret; 872 int ret;
876 873
@@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
878 if (i2c == NULL) 875 if (i2c == NULL)
879 return NULL; 876 return NULL;
880 877
881 /* set the internal bit adapter */
882 i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
883 i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
884 sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
885 i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
886 i2c->algo.radeon.bit_data.setsda = set_data;
887 i2c->algo.radeon.bit_data.setscl = set_clock;
888 i2c->algo.radeon.bit_data.getsda = get_data;
889 i2c->algo.radeon.bit_data.getscl = get_clock;
890 i2c->algo.radeon.bit_data.udelay = 20;
891 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
892 * make this, 2 jiffies is a lot more reliable */
893 i2c->algo.radeon.bit_data.timeout = 2;
894 i2c->algo.radeon.bit_data.data = i2c;
895 ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
896 if (ret) {
897 DRM_ERROR("Failed to register internal bit i2c %s\n", name);
898 goto out_free;
899 }
900 /* set the radeon i2c adapter */
901 i2c->dev = dev;
902 i2c->rec = *rec; 878 i2c->rec = *rec;
903 i2c->adapter.owner = THIS_MODULE; 879 i2c->adapter.owner = THIS_MODULE;
880 i2c->dev = dev;
904 i2c_set_adapdata(&i2c->adapter, i2c); 881 i2c_set_adapdata(&i2c->adapter, i2c);
905 sprintf(i2c->adapter.name, "Radeon i2c %s", name); 882 if (rec->mm_i2c ||
906 i2c->adapter.algo_data = &i2c->algo.radeon; 883 (rec->hw_capable &&
907 i2c->adapter.algo = &radeon_i2c_algo; 884 radeon_hw_i2c &&
908 ret = i2c_add_adapter(&i2c->adapter); 885 ((rdev->family <= CHIP_RS480) ||
909 if (ret) { 886 ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
910 DRM_ERROR("Failed to register i2c %s\n", name); 887 /* set the radeon hw i2c adapter */
911 goto out_free; 888 sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
889 i2c->adapter.algo = &radeon_i2c_algo;
890 ret = i2c_add_adapter(&i2c->adapter);
891 if (ret) {
892 DRM_ERROR("Failed to register hw i2c %s\n", name);
893 goto out_free;
894 }
895 } else {
896 /* set the radeon bit adapter */
897 sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
898 i2c->adapter.algo_data = &i2c->algo.bit;
899 i2c->algo.bit.pre_xfer = pre_xfer;
900 i2c->algo.bit.post_xfer = post_xfer;
901 i2c->algo.bit.setsda = set_data;
902 i2c->algo.bit.setscl = set_clock;
903 i2c->algo.bit.getsda = get_data;
904 i2c->algo.bit.getscl = get_clock;
905 i2c->algo.bit.udelay = 20;
906 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
907 * make this, 2 jiffies is a lot more reliable */
908 i2c->algo.bit.timeout = 2;
909 i2c->algo.bit.data = i2c;
910 ret = i2c_bit_add_bus(&i2c->adapter);
911 if (ret) {
912 DRM_ERROR("Failed to register bit i2c %s\n", name);
913 goto out_free;
914 }
912 } 915 }
913 916
914 return i2c; 917 return i2c;
@@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
953{ 956{
954 if (!i2c) 957 if (!i2c)
955 return; 958 return;
956 i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
957 i2c_del_adapter(&i2c->adapter);
958 kfree(i2c);
959}
960
961void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
962{
963 if (!i2c)
964 return;
965
966 i2c_del_adapter(&i2c->adapter); 959 i2c_del_adapter(&i2c->adapter);
967 kfree(i2c); 960 kfree(i2c);
968} 961}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index ea4c645ece11..a212041e8b0b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
67 67
68 /* Disable *all* interrupts */ 68 /* Disable *all* interrupts */
69 rdev->irq.sw_int = false; 69 rdev->irq.sw_int = false;
70 for (i = 0; i < 2; i++) { 70 for (i = 0; i < rdev->num_crtc; i++)
71 rdev->irq.crtc_vblank_int[i] = false; 71 rdev->irq.crtc_vblank_int[i] = false;
72 } 72 for (i = 0; i < 6; i++)
73 rdev->irq.hpd[i] = false;
73 radeon_irq_set(rdev); 74 radeon_irq_set(rdev);
74 /* Clear bits */ 75 /* Clear bits */
75 radeon_irq_process(rdev); 76 radeon_irq_process(rdev);
@@ -95,28 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
95 } 96 }
96 /* Disable *all* interrupts */ 97 /* Disable *all* interrupts */
97 rdev->irq.sw_int = false; 98 rdev->irq.sw_int = false;
98 for (i = 0; i < 2; i++) { 99 for (i = 0; i < rdev->num_crtc; i++)
99 rdev->irq.crtc_vblank_int[i] = false; 100 rdev->irq.crtc_vblank_int[i] = false;
101 for (i = 0; i < 6; i++)
100 rdev->irq.hpd[i] = false; 102 rdev->irq.hpd[i] = false;
101 }
102 radeon_irq_set(rdev); 103 radeon_irq_set(rdev);
103} 104}
104 105
105int radeon_irq_kms_init(struct radeon_device *rdev) 106int radeon_irq_kms_init(struct radeon_device *rdev)
106{ 107{
107 int r = 0; 108 int r = 0;
108 int num_crtc = 2;
109 109
110 if (rdev->flags & RADEON_SINGLE_CRTC)
111 num_crtc = 1;
112 spin_lock_init(&rdev->irq.sw_lock); 110 spin_lock_init(&rdev->irq.sw_lock);
113 r = drm_vblank_init(rdev->ddev, num_crtc); 111 r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
114 if (r) { 112 if (r) {
115 return r; 113 return r;
116 } 114 }
117 /* enable msi */ 115 /* enable msi */
118 rdev->msi_enabled = 0; 116 rdev->msi_enabled = 0;
119 if (rdev->family >= CHIP_RV380) { 117 /* MSIs don't seem to work reliably on all IGP
118 * chips. Disable MSI on them for now.
119 */
120 if ((rdev->family >= CHIP_RV380) &&
121 (!(rdev->flags & RADEON_IS_IGP))) {
120 int ret = pci_enable_msi(rdev->pdev); 122 int ret = pci_enable_msi(rdev->pdev);
121 if (!ret) { 123 if (!ret) {
122 rdev->msi_enabled = 1; 124 rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 20ec276e7596..c633319f98ed 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -31,6 +31,7 @@
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33#include <linux/vga_switcheroo.h> 33#include <linux/vga_switcheroo.h>
34#include <linux/slab.h>
34 35
35int radeon_driver_unload_kms(struct drm_device *dev) 36int radeon_driver_unload_kms(struct drm_device *dev)
36{ 37{
@@ -164,7 +165,7 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
164{ 165{
165 struct radeon_device *rdev = dev->dev_private; 166 struct radeon_device *rdev = dev->dev_private;
166 167
167 if (crtc < 0 || crtc > 1) { 168 if (crtc < 0 || crtc >= rdev->num_crtc) {
168 DRM_ERROR("Invalid crtc %d\n", crtc); 169 DRM_ERROR("Invalid crtc %d\n", crtc);
169 return -EINVAL; 170 return -EINVAL;
170 } 171 }
@@ -176,7 +177,7 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
176{ 177{
177 struct radeon_device *rdev = dev->dev_private; 178 struct radeon_device *rdev = dev->dev_private;
178 179
179 if (crtc < 0 || crtc > 1) { 180 if (crtc < 0 || crtc >= rdev->num_crtc) {
180 DRM_ERROR("Invalid crtc %d\n", crtc); 181 DRM_ERROR("Invalid crtc %d\n", crtc);
181 return -EINVAL; 182 return -EINVAL;
182 } 183 }
@@ -190,7 +191,7 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
190{ 191{
191 struct radeon_device *rdev = dev->dev_private; 192 struct radeon_device *rdev = dev->dev_private;
192 193
193 if (crtc < 0 || crtc > 1) { 194 if (crtc < 0 || crtc >= rdev->num_crtc) {
194 DRM_ERROR("Invalid crtc %d\n", crtc); 195 DRM_ERROR("Invalid crtc %d\n", crtc);
195 return; 196 return;
196 } 197 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index df23d6a01d02..88865e38fe30 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
603 ? RADEON_CRTC2_INTERLACE_EN 603 ? RADEON_CRTC2_INTERLACE_EN
604 : 0)); 604 : 0));
605 605
606 /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
607 if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
608 crtc2_gen_cntl |= RADEON_CRTC2_EN;
609
606 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL); 610 disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
607 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN; 611 disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
608 612
@@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
630 ? RADEON_CRTC_INTERLACE_EN 634 ? RADEON_CRTC_INTERLACE_EN
631 : 0)); 635 : 0));
632 636
637 /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
638 if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
639 crtc_gen_cntl |= RADEON_CRTC_EN;
640
633 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 641 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
634 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN | 642 crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
635 RADEON_CRTC_VSYNC_DIS | 643 RADEON_CRTC_VSYNC_DIS |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index cf389ce50a8a..0274abe17ad9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -228,16 +228,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
228 drm_mode_set_crtcinfo(adjusted_mode, 0); 228 drm_mode_set_crtcinfo(adjusted_mode, 0);
229 229
230 /* get the native mode for LVDS */ 230 /* get the native mode for LVDS */
231 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { 231 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
232 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 232 radeon_panel_mode_fixup(encoder, adjusted_mode);
233 int mode_id = adjusted_mode->base.id;
234 *adjusted_mode = *native_mode;
235 adjusted_mode->hdisplay = mode->hdisplay;
236 adjusted_mode->vdisplay = mode->vdisplay;
237 adjusted_mode->crtc_hdisplay = mode->hdisplay;
238 adjusted_mode->crtc_vdisplay = mode->vdisplay;
239 adjusted_mode->base.id = mode_id;
240 }
241 233
242 return true; 234 return true;
243} 235}
@@ -830,8 +822,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
830 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; 822 crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON;
831 823
832 if (rdev->family == CHIP_R420 || 824 if (rdev->family == CHIP_R420 ||
833 rdev->family == CHIP_R423 || 825 rdev->family == CHIP_R423 ||
834 rdev->family == CHIP_RV410) 826 rdev->family == CHIP_RV410)
835 tv_dac_cntl |= (R420_TV_DAC_RDACPD | 827 tv_dac_cntl |= (R420_TV_DAC_RDACPD |
836 R420_TV_DAC_GDACPD | 828 R420_TV_DAC_GDACPD |
837 R420_TV_DAC_BDACPD | 829 R420_TV_DAC_BDACPD |
@@ -907,35 +899,43 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
907 if (rdev->family != CHIP_R200) { 899 if (rdev->family != CHIP_R200) {
908 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 900 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
909 if (rdev->family == CHIP_R420 || 901 if (rdev->family == CHIP_R420 ||
910 rdev->family == CHIP_R423 || 902 rdev->family == CHIP_R423 ||
911 rdev->family == CHIP_RV410) { 903 rdev->family == CHIP_RV410) {
912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 904 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
913 RADEON_TV_DAC_BGADJ_MASK | 905 RADEON_TV_DAC_BGADJ_MASK |
914 R420_TV_DAC_DACADJ_MASK | 906 R420_TV_DAC_DACADJ_MASK |
915 R420_TV_DAC_RDACPD | 907 R420_TV_DAC_RDACPD |
916 R420_TV_DAC_GDACPD | 908 R420_TV_DAC_GDACPD |
917 R420_TV_DAC_BDACPD | 909 R420_TV_DAC_BDACPD |
918 R420_TV_DAC_TVENABLE); 910 R420_TV_DAC_TVENABLE);
919 } else { 911 } else {
920 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | 912 tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK |
921 RADEON_TV_DAC_BGADJ_MASK | 913 RADEON_TV_DAC_BGADJ_MASK |
922 RADEON_TV_DAC_DACADJ_MASK | 914 RADEON_TV_DAC_DACADJ_MASK |
923 RADEON_TV_DAC_RDACPD | 915 RADEON_TV_DAC_RDACPD |
924 RADEON_TV_DAC_GDACPD | 916 RADEON_TV_DAC_GDACPD |
925 RADEON_TV_DAC_BDACPD); 917 RADEON_TV_DAC_BDACPD);
926 } 918 }
927 919
928 /* FIXME TV */ 920 tv_dac_cntl |= RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD;
929 if (tv_dac) { 921
930 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; 922 if (is_tv) {
931 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 923 if (tv_dac->tv_std == TV_STD_NTSC ||
932 RADEON_TV_DAC_NHOLD | 924 tv_dac->tv_std == TV_STD_NTSC_J ||
933 RADEON_TV_DAC_STD_PS2 | 925 tv_dac->tv_std == TV_STD_PAL_M ||
934 tv_dac->ps2_tvdac_adj); 926 tv_dac->tv_std == TV_STD_PAL_60)
927 tv_dac_cntl |= tv_dac->ntsc_tvdac_adj;
928 else
929 tv_dac_cntl |= tv_dac->pal_tvdac_adj;
930
931 if (tv_dac->tv_std == TV_STD_NTSC ||
932 tv_dac->tv_std == TV_STD_NTSC_J)
933 tv_dac_cntl |= RADEON_TV_DAC_STD_NTSC;
934 else
935 tv_dac_cntl |= RADEON_TV_DAC_STD_PAL;
935 } else 936 } else
936 tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | 937 tv_dac_cntl |= (RADEON_TV_DAC_STD_PS2 |
937 RADEON_TV_DAC_NHOLD | 938 tv_dac->ps2_tvdac_adj);
938 RADEON_TV_DAC_STD_PS2);
939 939
940 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 940 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
941 } 941 }
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684daef4c..f2ed27c8055b 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
57#define NTSC_TV_PLL_N_14 693 57#define NTSC_TV_PLL_N_14 693
58#define NTSC_TV_PLL_P_14 7 58#define NTSC_TV_PLL_P_14 7
59 59
60#define PAL_TV_PLL_M_14 19
61#define PAL_TV_PLL_N_14 353
62#define PAL_TV_PLL_P_14 5
63
60#define VERT_LEAD_IN_LINES 2 64#define VERT_LEAD_IN_LINES 2
61#define FRAC_BITS 0xe 65#define FRAC_BITS 0xe
62#define FRAC_MASK 0x3fff 66#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
205 630627, /* defRestart */ 209 630627, /* defRestart */
206 347, /* crtcPLL_N */ 210 347, /* crtcPLL_N */
207 14, /* crtcPLL_M */ 211 14, /* crtcPLL_M */
208 8, /* crtcPLL_postDiv */ 212 8, /* crtcPLL_postDiv */
209 1022, /* pixToTV */ 213 1022, /* pixToTV */
210 }, 214 },
215 { /* PAL timing for 14 Mhz ref clk */
216 800, /* horResolution */
217 600, /* verResolution */
218 TV_STD_PAL, /* standard */
219 1131, /* horTotal */
220 742, /* verTotal */
221 813, /* horStart */
222 840, /* horSyncStart */
223 633, /* verSyncStart */
224 708369, /* defRestart */
225 211, /* crtcPLL_N */
226 9, /* crtcPLL_M */
227 8, /* crtcPLL_postDiv */
228 759, /* pixToTV */
229 },
211}; 230};
212 231
213#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes) 232#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
242 if (pll->reference_freq == 2700) 261 if (pll->reference_freq == 2700)
243 const_ptr = &available_tv_modes[1]; 262 const_ptr = &available_tv_modes[1];
244 else 263 else
245 const_ptr = &available_tv_modes[1]; /* FIX ME */ 264 const_ptr = &available_tv_modes[3];
246 } 265 }
247 return const_ptr; 266 return const_ptr;
248} 267}
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
685 n = PAL_TV_PLL_N_27; 704 n = PAL_TV_PLL_N_27;
686 p = PAL_TV_PLL_P_27; 705 p = PAL_TV_PLL_P_27;
687 } else { 706 } else {
688 m = PAL_TV_PLL_M_27; 707 m = PAL_TV_PLL_M_14;
689 n = PAL_TV_PLL_N_27; 708 n = PAL_TV_PLL_N_14;
690 p = PAL_TV_PLL_P_27; 709 p = PAL_TV_PLL_P_14;
691 } 710 }
692 } 711 }
693 712
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1702b820aa4d..5413fcd63086 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -129,6 +129,7 @@ struct radeon_tmds_pll {
129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) 129#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) 130#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
131#define RADEON_PLL_USE_POST_DIV (1 << 12) 131#define RADEON_PLL_USE_POST_DIV (1 << 12)
132#define RADEON_PLL_IS_LCD (1 << 13)
132 133
133/* pll algo */ 134/* pll algo */
134enum radeon_pll_algo { 135enum radeon_pll_algo {
@@ -149,6 +150,8 @@ struct radeon_pll {
149 uint32_t pll_in_max; 150 uint32_t pll_in_max;
150 uint32_t pll_out_min; 151 uint32_t pll_out_min;
151 uint32_t pll_out_max; 152 uint32_t pll_out_max;
153 uint32_t lcd_pll_out_min;
154 uint32_t lcd_pll_out_max;
152 uint32_t best_vco; 155 uint32_t best_vco;
153 156
154 /* divider limits */ 157 /* divider limits */
@@ -170,17 +173,12 @@ struct radeon_pll {
170 enum radeon_pll_algo algo; 173 enum radeon_pll_algo algo;
171}; 174};
172 175
173struct i2c_algo_radeon_data {
174 struct i2c_adapter bit_adapter;
175 struct i2c_algo_bit_data bit_data;
176};
177
178struct radeon_i2c_chan { 176struct radeon_i2c_chan {
179 struct i2c_adapter adapter; 177 struct i2c_adapter adapter;
180 struct drm_device *dev; 178 struct drm_device *dev;
181 union { 179 union {
180 struct i2c_algo_bit_data bit;
182 struct i2c_algo_dp_aux_data dp; 181 struct i2c_algo_dp_aux_data dp;
183 struct i2c_algo_radeon_data radeon;
184 } algo; 182 } algo;
185 struct radeon_i2c_bus_rec rec; 183 struct radeon_i2c_bus_rec rec;
186}; 184};
@@ -342,6 +340,7 @@ struct radeon_encoder {
342 struct drm_display_mode native_mode; 340 struct drm_display_mode native_mode;
343 void *enc_priv; 341 void *enc_priv;
344 int hdmi_offset; 342 int hdmi_offset;
343 int hdmi_config_offset;
345 int hdmi_audio_workaround; 344 int hdmi_audio_workaround;
346 int hdmi_buffer_status; 345 int hdmi_buffer_status;
347}; 346};
@@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
431 struct radeon_i2c_bus_rec *rec, 430 struct radeon_i2c_bus_rec *rec,
432 const char *name); 431 const char *name);
433extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 432extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
434extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
435extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, 433extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
436 u8 slave_addr, 434 u8 slave_addr,
437 u8 addr, 435 u8 addr,
@@ -560,6 +558,8 @@ extern int radeon_static_clocks_init(struct drm_device *dev);
560bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, 558bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
561 struct drm_display_mode *mode, 559 struct drm_display_mode *mode,
562 struct drm_display_mode *adjusted_mode); 560 struct drm_display_mode *adjusted_mode);
561void radeon_panel_mode_fixup(struct drm_encoder *encoder,
562 struct drm_display_mode *adjusted_mode);
563void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc); 563void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc);
564 564
565/* legacy tv */ 565/* legacy tv */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fc9d00ac6b15..122774742bd5 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -30,6 +30,7 @@
30 * Dave Airlie 30 * Dave Airlie
31 */ 31 */
32#include <linux/list.h> 32#include <linux/list.h>
33#include <linux/slab.h>
33#include <drm/drmP.h> 34#include <drm/drmP.h>
34#include "radeon_drm.h" 35#include "radeon_drm.h"
35#include "radeon.h" 36#include "radeon.h"
@@ -185,8 +186,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
185 return 0; 186 return 0;
186 } 187 }
187 radeon_ttm_placement_from_domain(bo, domain); 188 radeon_ttm_placement_from_domain(bo, domain);
188 /* force to pin into visible video ram */ 189 if (domain == RADEON_GEM_DOMAIN_VRAM) {
189 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 190 /* force to pin into visible video ram */
191 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
192 }
190 for (i = 0; i < bo->placement.num_placement; i++) 193 for (i = 0; i < bo->placement.num_placement; i++)
191 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 194 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
192 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 195 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d4d1c39a0e99..a4b57493aa78 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -28,6 +28,7 @@
28#define RADEON_RECLOCK_DELAY_MS 200 28#define RADEON_RECLOCK_DELAY_MS 200
29#define RADEON_WAIT_VBLANK_TIMEOUT 200 29#define RADEON_WAIT_VBLANK_TIMEOUT 200
30 30
31static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
31static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); 32static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
32static void radeon_pm_set_clocks(struct radeon_device *rdev); 33static void radeon_pm_set_clocks(struct radeon_device *rdev);
33static void radeon_pm_idle_work_handler(struct work_struct *work); 34static void radeon_pm_idle_work_handler(struct work_struct *work);
@@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev,
179 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 180 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
180} 181}
181 182
183static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
184{
185 if (rdev->pm.active_crtcs) {
186 rdev->pm.vblank_sync = false;
187 wait_event_timeout(
188 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
189 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
190 }
191}
192
182static void radeon_set_power_state(struct radeon_device *rdev) 193static void radeon_set_power_state(struct radeon_device *rdev)
183{ 194{
184 /* if *_clock_mode are the same, *_power_state are as well */ 195 /* if *_clock_mode are the same, *_power_state are as well */
@@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev)
189 rdev->pm.requested_clock_mode->sclk, 200 rdev->pm.requested_clock_mode->sclk,
190 rdev->pm.requested_clock_mode->mclk, 201 rdev->pm.requested_clock_mode->mclk,
191 rdev->pm.requested_power_state->non_clock_info.pcie_lanes); 202 rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
203
192 /* set pcie lanes */ 204 /* set pcie lanes */
205 /* TODO */
206
193 /* set voltage */ 207 /* set voltage */
208 /* TODO */
209
194 /* set engine clock */ 210 /* set engine clock */
211 radeon_sync_with_vblank(rdev);
212 radeon_pm_debug_check_in_vbl(rdev, false);
195 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); 213 radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
214 radeon_pm_debug_check_in_vbl(rdev, true);
215
216#if 0
196 /* set memory clock */ 217 /* set memory clock */
218 if (rdev->asic->set_memory_clock) {
219 radeon_sync_with_vblank(rdev);
220 radeon_pm_debug_check_in_vbl(rdev, false);
221 radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
222 radeon_pm_debug_check_in_vbl(rdev, true);
223 }
224#endif
197 225
198 rdev->pm.current_power_state = rdev->pm.requested_power_state; 226 rdev->pm.current_power_state = rdev->pm.requested_power_state;
199 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; 227 rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
@@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev)
229 return 0; 257 return 0;
230} 258}
231 259
260void radeon_pm_fini(struct radeon_device *rdev)
261{
262 if (rdev->pm.i2c_bus)
263 radeon_i2c_destroy(rdev->pm.i2c_bus);
264}
265
232void radeon_pm_compute_clocks(struct radeon_device *rdev) 266void radeon_pm_compute_clocks(struct radeon_device *rdev)
233{ 267{
234 struct drm_device *ddev = rdev->ddev; 268 struct drm_device *ddev = rdev->ddev;
@@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
245 list_for_each_entry(connector, 279 list_for_each_entry(connector,
246 &ddev->mode_config.connector_list, head) { 280 &ddev->mode_config.connector_list, head) {
247 if (connector->encoder && 281 if (connector->encoder &&
248 connector->dpms != DRM_MODE_DPMS_OFF) { 282 connector->encoder->crtc &&
283 connector->dpms != DRM_MODE_DPMS_OFF) {
249 radeon_crtc = to_radeon_crtc(connector->encoder->crtc); 284 radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
250 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); 285 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
251 ++count; 286 ++count;
@@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
333 break; 368 break;
334 } 369 }
335 370
336 /* check if we are in vblank */
337 radeon_pm_debug_check_in_vbl(rdev, false);
338 radeon_set_power_state(rdev); 371 radeon_set_power_state(rdev);
339 radeon_pm_debug_check_in_vbl(rdev, true);
340 rdev->pm.planned_action = PM_ACTION_NONE; 372 rdev->pm.planned_action = PM_ACTION_NONE;
341} 373}
342 374
@@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
353 rdev->pm.req_vblank |= (1 << 1); 385 rdev->pm.req_vblank |= (1 << 1);
354 drm_vblank_get(rdev->ddev, 1); 386 drm_vblank_get(rdev->ddev, 1);
355 } 387 }
356 if (rdev->pm.active_crtcs) 388 radeon_pm_set_clocks_locked(rdev);
357 wait_event_interruptible_timeout(
358 rdev->irq.vblank_queue, 0,
359 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
360 if (rdev->pm.req_vblank & (1 << 0)) { 389 if (rdev->pm.req_vblank & (1 << 0)) {
361 rdev->pm.req_vblank &= ~(1 << 0); 390 rdev->pm.req_vblank &= ~(1 << 0);
362 drm_vblank_put(rdev->ddev, 0); 391 drm_vblank_put(rdev->ddev, 0);
@@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
366 drm_vblank_put(rdev->ddev, 1); 395 drm_vblank_put(rdev->ddev, 1);
367 } 396 }
368 397
369 radeon_pm_set_clocks_locked(rdev);
370 mutex_unlock(&rdev->cp.mutex); 398 mutex_unlock(&rdev->cp.mutex);
371} 399}
372 400
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5c0dc082d330..eabbc9cf30a7 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -346,6 +346,7 @@
346# define RADEON_TVPLL_PWRMGT_OFF (1 << 30) 346# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
347# define RADEON_TVCLK_TURNOFF (1 << 31) 347# define RADEON_TVCLK_TURNOFF (1 << 31)
348#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */ 348#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
349# define RADEON_PM_MODE_SEL (1 << 13)
349# define RADEON_TCL_BYPASS_DISABLE (1 << 20) 350# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
350#define RADEON_CLR_CMP_CLR_3D 0x1a24 351#define RADEON_CLR_CMP_CLR_3D 0x1a24
351#define RADEON_CLR_CMP_CLR_DST 0x15c8 352#define RADEON_CLR_CMP_CLR_DST 0x15c8
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e50513a62735..f6e1e8d4d986 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -26,6 +26,7 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "radeon_drm.h" 31#include "radeon_drm.h"
31#include "radeon_reg.h" 32#include "radeon_reg.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 43c5ab34b634..d031b6863082 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -36,6 +36,7 @@
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <drm/radeon_drm.h> 37#include <drm/radeon_drm.h>
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/slab.h>
39#include "radeon_reg.h" 40#include "radeon_reg.h"
40#include "radeon.h" 41#include "radeon.h"
41 42
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 19c4663fa9c6..1e97b2d129fd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -125,6 +125,8 @@ r300 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 989f7a020832..e958980d00f1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -125,6 +125,8 @@ r420 0x4f60
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 8f414a5f520f..af0da4ae3f55 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -26,20 +26,16 @@ r600 0x9400
260x00028408 VGT_INDX_OFFSET 260x00028408 VGT_INDX_OFFSET
270x00028AA0 VGT_INSTANCE_STEP_RATE_0 270x00028AA0 VGT_INSTANCE_STEP_RATE_0
280x00028AA4 VGT_INSTANCE_STEP_RATE_1 280x00028AA4 VGT_INSTANCE_STEP_RATE_1
290x000088C0 VGT_LAST_COPY_STATE
300x00028400 VGT_MAX_VTX_INDX 290x00028400 VGT_MAX_VTX_INDX
310x000088D8 VGT_MC_LAT_CNTL
320x00028404 VGT_MIN_VTX_INDX 300x00028404 VGT_MIN_VTX_INDX
330x00028A94 VGT_MULTI_PRIM_IB_RESET_EN 310x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
340x0002840C VGT_MULTI_PRIM_IB_RESET_INDX 320x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
350x00008970 VGT_NUM_INDICES 330x00008970 VGT_NUM_INDICES
360x00008974 VGT_NUM_INSTANCES 340x00008974 VGT_NUM_INSTANCES
370x00028A10 VGT_OUTPUT_PATH_CNTL 350x00028A10 VGT_OUTPUT_PATH_CNTL
380x00028C5C VGT_OUT_DEALLOC_CNTL
390x00028A84 VGT_PRIMITIVEID_EN 360x00028A84 VGT_PRIMITIVEID_EN
400x00008958 VGT_PRIMITIVE_TYPE 370x00008958 VGT_PRIMITIVE_TYPE
410x00028AB4 VGT_REUSE_OFF 380x00028AB4 VGT_REUSE_OFF
420x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
430x00028AB8 VGT_VTX_CNT_EN 390x00028AB8 VGT_VTX_CNT_EN
440x000088B0 VGT_VTX_VECT_EJECT_REG 400x000088B0 VGT_VTX_VECT_EJECT_REG
450x00028810 PA_CL_CLIP_CNTL 410x00028810 PA_CL_CLIP_CNTL
@@ -280,7 +276,6 @@ r600 0x9400
2800x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE 2760x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
2810x00028814 PA_SU_SC_MODE_CNTL 2770x00028814 PA_SU_SC_MODE_CNTL
2820x00028C08 PA_SU_VTX_CNTL 2780x00028C08 PA_SU_VTX_CNTL
2830x00008C00 SQ_CONFIG
2840x00008C04 SQ_GPR_RESOURCE_MGMT_1 2790x00008C04 SQ_GPR_RESOURCE_MGMT_1
2850x00008C08 SQ_GPR_RESOURCE_MGMT_2 2800x00008C08 SQ_GPR_RESOURCE_MGMT_2
2860x00008C10 SQ_STACK_RESOURCE_MGMT_1 2810x00008C10 SQ_STACK_RESOURCE_MGMT_1
@@ -320,18 +315,6 @@ r600 0x9400
3200x000283FC SQ_VTX_SEMANTIC_31 3150x000283FC SQ_VTX_SEMANTIC_31
3210x000288E0 SQ_VTX_SEMANTIC_CLEAR 3160x000288E0 SQ_VTX_SEMANTIC_CLEAR
3220x0003CFF4 SQ_VTX_START_INST_LOC 3170x0003CFF4 SQ_VTX_START_INST_LOC
3230x0003C000 SQ_TEX_SAMPLER_WORD0_0
3240x0003C004 SQ_TEX_SAMPLER_WORD1_0
3250x0003C008 SQ_TEX_SAMPLER_WORD2_0
3260x00030000 SQ_ALU_CONSTANT0_0
3270x00030004 SQ_ALU_CONSTANT1_0
3280x00030008 SQ_ALU_CONSTANT2_0
3290x0003000C SQ_ALU_CONSTANT3_0
3300x0003E380 SQ_BOOL_CONST_0
3310x0003E384 SQ_BOOL_CONST_1
3320x0003E388 SQ_BOOL_CONST_2
3330x0003E200 SQ_LOOP_CONST_0
3340x0003E200 SQ_LOOP_CONST_DX10_0
3350x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 3180x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
3360x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 3190x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
3370x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 3200x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
@@ -380,54 +363,6 @@ r600 0x9400
3800x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 3630x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
3810x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 3640x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
3820x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 3650x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
3830x000289C0 SQ_ALU_CONST_CACHE_GS_0
3840x000289C4 SQ_ALU_CONST_CACHE_GS_1
3850x000289C8 SQ_ALU_CONST_CACHE_GS_2
3860x000289CC SQ_ALU_CONST_CACHE_GS_3
3870x000289D0 SQ_ALU_CONST_CACHE_GS_4
3880x000289D4 SQ_ALU_CONST_CACHE_GS_5
3890x000289D8 SQ_ALU_CONST_CACHE_GS_6
3900x000289DC SQ_ALU_CONST_CACHE_GS_7
3910x000289E0 SQ_ALU_CONST_CACHE_GS_8
3920x000289E4 SQ_ALU_CONST_CACHE_GS_9
3930x000289E8 SQ_ALU_CONST_CACHE_GS_10
3940x000289EC SQ_ALU_CONST_CACHE_GS_11
3950x000289F0 SQ_ALU_CONST_CACHE_GS_12
3960x000289F4 SQ_ALU_CONST_CACHE_GS_13
3970x000289F8 SQ_ALU_CONST_CACHE_GS_14
3980x000289FC SQ_ALU_CONST_CACHE_GS_15
3990x00028940 SQ_ALU_CONST_CACHE_PS_0
4000x00028944 SQ_ALU_CONST_CACHE_PS_1
4010x00028948 SQ_ALU_CONST_CACHE_PS_2
4020x0002894C SQ_ALU_CONST_CACHE_PS_3
4030x00028950 SQ_ALU_CONST_CACHE_PS_4
4040x00028954 SQ_ALU_CONST_CACHE_PS_5
4050x00028958 SQ_ALU_CONST_CACHE_PS_6
4060x0002895C SQ_ALU_CONST_CACHE_PS_7
4070x00028960 SQ_ALU_CONST_CACHE_PS_8
4080x00028964 SQ_ALU_CONST_CACHE_PS_9
4090x00028968 SQ_ALU_CONST_CACHE_PS_10
4100x0002896C SQ_ALU_CONST_CACHE_PS_11
4110x00028970 SQ_ALU_CONST_CACHE_PS_12
4120x00028974 SQ_ALU_CONST_CACHE_PS_13
4130x00028978 SQ_ALU_CONST_CACHE_PS_14
4140x0002897C SQ_ALU_CONST_CACHE_PS_15
4150x00028980 SQ_ALU_CONST_CACHE_VS_0
4160x00028984 SQ_ALU_CONST_CACHE_VS_1
4170x00028988 SQ_ALU_CONST_CACHE_VS_2
4180x0002898C SQ_ALU_CONST_CACHE_VS_3
4190x00028990 SQ_ALU_CONST_CACHE_VS_4
4200x00028994 SQ_ALU_CONST_CACHE_VS_5
4210x00028998 SQ_ALU_CONST_CACHE_VS_6
4220x0002899C SQ_ALU_CONST_CACHE_VS_7
4230x000289A0 SQ_ALU_CONST_CACHE_VS_8
4240x000289A4 SQ_ALU_CONST_CACHE_VS_9
4250x000289A8 SQ_ALU_CONST_CACHE_VS_10
4260x000289AC SQ_ALU_CONST_CACHE_VS_11
4270x000289B0 SQ_ALU_CONST_CACHE_VS_12
4280x000289B4 SQ_ALU_CONST_CACHE_VS_13
4290x000289B8 SQ_ALU_CONST_CACHE_VS_14
4300x000289BC SQ_ALU_CONST_CACHE_VS_15
4310x000288D8 SQ_PGM_CF_OFFSET_ES 3660x000288D8 SQ_PGM_CF_OFFSET_ES
4320x000288DC SQ_PGM_CF_OFFSET_FS 3670x000288DC SQ_PGM_CF_OFFSET_FS
4330x000288D4 SQ_PGM_CF_OFFSET_GS 3680x000288D4 SQ_PGM_CF_OFFSET_GS
@@ -494,12 +429,7 @@ r600 0x9400
4940x00028438 SX_ALPHA_REF 4290x00028438 SX_ALPHA_REF
4950x00028410 SX_ALPHA_TEST_CONTROL 4300x00028410 SX_ALPHA_TEST_CONTROL
4960x00028350 SX_MISC 4310x00028350 SX_MISC
4970x0000A020 SMX_DC_CTL0
4980x0000A024 SMX_DC_CTL1
4990x0000A028 SMX_DC_CTL2
5000x00009608 TC_CNTL
5010x00009604 TC_INVALIDATE 4320x00009604 TC_INVALIDATE
5020x00009490 TD_CNTL
5030x00009400 TD_FILTER4 4330x00009400 TD_FILTER4
5040x00009404 TD_FILTER4_1 4340x00009404 TD_FILTER4_1
5050x00009408 TD_FILTER4_2 4350x00009408 TD_FILTER4_2
@@ -824,14 +754,9 @@ r600 0x9400
8240x00028428 CB_FOG_GREEN 7540x00028428 CB_FOG_GREEN
8250x00028424 CB_FOG_RED 7550x00028424 CB_FOG_RED
8260x00008040 WAIT_UNTIL 7560x00008040 WAIT_UNTIL
8270x00008950 CC_GC_SHADER_PIPE_CONFIG
8280x00008954 GC_USER_SHADER_PIPE_CONFIG
8290x00009714 VC_ENHANCE 7570x00009714 VC_ENHANCE
8300x00009830 DB_DEBUG 7580x00009830 DB_DEBUG
8310x00009838 DB_WATERMARKS 7590x00009838 DB_WATERMARKS
8320x00028D28 DB_SRESULTS_COMPARE_STATE0 7600x00028D28 DB_SRESULTS_COMPARE_STATE0
8330x00028D44 DB_ALPHA_TO_MASK 7610x00028D44 DB_ALPHA_TO_MASK
8340x00009504 TA_CNTL
8350x00009700 VC_CNTL 7620x00009700 VC_CNTL
8360x00009718 VC_CONFIG
8370x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 6801b865d1c4..83e8bc0c2bb2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -125,6 +125,8 @@ rs600 0x6d40
1250x4000 GB_VAP_RASTER_VTX_FMT_0 1250x4000 GB_VAP_RASTER_VTX_FMT_0
1260x4004 GB_VAP_RASTER_VTX_FMT_1 1260x4004 GB_VAP_RASTER_VTX_FMT_1
1270x4008 GB_ENABLE 1270x4008 GB_ENABLE
1280x4010 GB_MSPOS0
1290x4014 GB_MSPOS1
1280x401C GB_SELECT 1300x401C GB_SELECT
1290x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1300x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 38abf63bf2cd..1e46233985eb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -35,6 +35,7 @@ rv515 0x6d40
350x1DA8 VAP_VPORT_ZSCALE 350x1DA8 VAP_VPORT_ZSCALE
360x1DAC VAP_VPORT_ZOFFSET 360x1DAC VAP_VPORT_ZOFFSET
370x2080 VAP_CNTL 370x2080 VAP_CNTL
380x208C VAP_INDEX_OFFSET
380x2090 VAP_OUT_VTX_FMT_0 390x2090 VAP_OUT_VTX_FMT_0
390x2094 VAP_OUT_VTX_FMT_1 400x2094 VAP_OUT_VTX_FMT_1
400x20B0 VAP_VTE_CNTL 410x20B0 VAP_VTE_CNTL
@@ -158,6 +159,8 @@ rv515 0x6d40
1580x4000 GB_VAP_RASTER_VTX_FMT_0 1590x4000 GB_VAP_RASTER_VTX_FMT_0
1590x4004 GB_VAP_RASTER_VTX_FMT_1 1600x4004 GB_VAP_RASTER_VTX_FMT_1
1600x4008 GB_ENABLE 1610x4008 GB_ENABLE
1620x4010 GB_MSPOS0
1630x4014 GB_MSPOS1
1610x401C GB_SELECT 1640x401C GB_SELECT
1620x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1630x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 626d51891ee9..1a41cb268b72 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -26,8 +26,10 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include <drm/drmP.h> 30#include <drm/drmP.h>
30#include "radeon.h" 31#include "radeon.h"
32#include "radeon_asic.h"
31#include "rs400d.h" 33#include "rs400d.h"
32 34
33/* This files gather functions specifics to : rs400,rs480 */ 35/* This files gather functions specifics to : rs400,rs480 */
@@ -202,9 +204,9 @@ void rs400_gart_disable(struct radeon_device *rdev)
202 204
203void rs400_gart_fini(struct radeon_device *rdev) 205void rs400_gart_fini(struct radeon_device *rdev)
204{ 206{
207 radeon_gart_fini(rdev);
205 rs400_gart_disable(rdev); 208 rs400_gart_disable(rdev);
206 radeon_gart_table_ram_free(rdev); 209 radeon_gart_table_ram_free(rdev);
207 radeon_gart_fini(rdev);
208} 210}
209 211
210int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) 212int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@ -264,6 +266,7 @@ void rs400_mc_init(struct radeon_device *rdev)
264 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; 266 base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
265 radeon_vram_location(rdev, &rdev->mc, base); 267 radeon_vram_location(rdev, &rdev->mc, base);
266 radeon_gtt_location(rdev, &rdev->mc); 268 radeon_gtt_location(rdev, &rdev->mc);
269 radeon_update_bandwidth_info(rdev);
267} 270}
268 271
269uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) 272uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -388,6 +391,8 @@ static int rs400_startup(struct radeon_device *rdev)
388{ 391{
389 int r; 392 int r;
390 393
394 r100_set_common_regs(rdev);
395
391 rs400_mc_program(rdev); 396 rs400_mc_program(rdev);
392 /* Resume clock */ 397 /* Resume clock */
393 r300_clock_startup(rdev); 398 r300_clock_startup(rdev);
@@ -453,6 +458,7 @@ int rs400_suspend(struct radeon_device *rdev)
453 458
454void rs400_fini(struct radeon_device *rdev) 459void rs400_fini(struct radeon_device *rdev)
455{ 460{
461 radeon_pm_fini(rdev);
456 r100_cp_fini(rdev); 462 r100_cp_fini(rdev);
457 r100_wb_fini(rdev); 463 r100_wb_fini(rdev);
458 r100_ib_fini(rdev); 464 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 47f046b78c6b..a81bc7a21e14 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,6 +37,7 @@
37 */ 37 */
38#include "drmP.h" 38#include "drmP.h"
39#include "radeon.h" 39#include "radeon.h"
40#include "radeon_asic.h"
40#include "atom.h" 41#include "atom.h"
41#include "rs600d.h" 42#include "rs600d.h"
42 43
@@ -158,7 +159,7 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev)
158 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 159 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
159 160
160 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 161 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
161 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); 162 tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) | S_000100_INVALIDATE_L2_CACHE(1);
162 WREG32_MC(R_000100_MC_PT0_CNTL, tmp); 163 WREG32_MC(R_000100_MC_PT0_CNTL, tmp);
163 164
164 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 165 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
@@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
267 268
268void rs600_gart_fini(struct radeon_device *rdev) 269void rs600_gart_fini(struct radeon_device *rdev)
269{ 270{
271 radeon_gart_fini(rdev);
270 rs600_gart_disable(rdev); 272 rs600_gart_disable(rdev);
271 radeon_gart_table_vram_free(rdev); 273 radeon_gart_table_vram_free(rdev);
272 radeon_gart_fini(rdev);
273} 274}
274 275
275#define R600_PTE_VALID (1 << 0) 276#define R600_PTE_VALID (1 << 0)
@@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev)
392 /* Vertical blank interrupts */ 393 /* Vertical blank interrupts */
393 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { 394 if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
394 drm_handle_vblank(rdev->ddev, 0); 395 drm_handle_vblank(rdev->ddev, 0);
396 rdev->pm.vblank_sync = true;
395 wake_up(&rdev->irq.vblank_queue); 397 wake_up(&rdev->irq.vblank_queue);
396 } 398 }
397 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { 399 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
398 drm_handle_vblank(rdev->ddev, 1); 400 drm_handle_vblank(rdev->ddev, 1);
401 rdev->pm.vblank_sync = true;
399 wake_up(&rdev->irq.vblank_queue); 402 wake_up(&rdev->irq.vblank_queue);
400 } 403 }
401 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { 404 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
@@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev)
472 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 475 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
473 base = RREG32_MC(R_000004_MC_FB_LOCATION); 476 base = RREG32_MC(R_000004_MC_FB_LOCATION);
474 base = G_000004_MC_FB_START(base) << 16; 477 base = G_000004_MC_FB_START(base) << 16;
478 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
475 radeon_vram_location(rdev, &rdev->mc, base); 479 radeon_vram_location(rdev, &rdev->mc, base);
476 radeon_gtt_location(rdev, &rdev->mc); 480 radeon_gtt_location(rdev, &rdev->mc);
481 radeon_update_bandwidth_info(rdev);
477} 482}
478 483
479void rs600_bandwidth_update(struct radeon_device *rdev) 484void rs600_bandwidth_update(struct radeon_device *rdev)
480{ 485{
481 /* FIXME: implement, should this be like rs690 ? */ 486 struct drm_display_mode *mode0 = NULL;
487 struct drm_display_mode *mode1 = NULL;
488 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
489 /* FIXME: implement full support */
490
491 radeon_update_display_priority(rdev);
492
493 if (rdev->mode_info.crtcs[0]->base.enabled)
494 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
495 if (rdev->mode_info.crtcs[1]->base.enabled)
496 mode1 = &rdev->mode_info.crtcs[1]->base.mode;
497
498 rs690_line_buffer_adjust(rdev, mode0, mode1);
499
500 if (rdev->disp_priority == 2) {
501 d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
502 d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
503 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
504 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
505 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
506 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
507 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
508 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
509 }
482} 510}
483 511
484uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) 512uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev)
598 626
599void rs600_fini(struct radeon_device *rdev) 627void rs600_fini(struct radeon_device *rdev)
600{ 628{
629 radeon_pm_fini(rdev);
601 r100_cp_fini(rdev); 630 r100_cp_fini(rdev);
602 r100_wb_fini(rdev); 631 r100_wb_fini(rdev);
603 r100_ib_fini(rdev); 632 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index c1c8f5885cbb..e52d2695510b 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -535,4 +535,57 @@
535#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) 535#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
536#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF 536#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
537 537
538#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
539#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
540#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
541#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
542#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
543#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
544#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
545#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
546#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
547#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
548#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
549#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
550#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
551#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
552#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
553#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
554#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
555#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
556#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
557#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
558#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
559#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
560#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
561#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
562#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
563#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
564#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
565#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
566#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
567#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
568#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
569#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
570#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
571#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
572#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
573#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
574#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
575#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
576#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
577#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
578#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
579#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
580#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
581#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
582#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
583#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
584#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
585#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
586#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
587#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
588#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
589#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
590
538#endif 591#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 83b9174f76f2..bbf3da790fd5 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -27,6 +27,7 @@
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "radeon_asic.h"
30#include "atom.h" 31#include "atom.h"
31#include "rs690d.h" 32#include "rs690d.h"
32 33
@@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev)
57 } 58 }
58} 59}
59 60
61union igp_info {
62 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
63 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
64};
65
60void rs690_pm_info(struct radeon_device *rdev) 66void rs690_pm_info(struct radeon_device *rdev)
61{ 67{
62 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); 68 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
63 struct _ATOM_INTEGRATED_SYSTEM_INFO *info; 69 union igp_info *info;
64 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
65 void *ptr;
66 uint16_t data_offset; 70 uint16_t data_offset;
67 uint8_t frev, crev; 71 uint8_t frev, crev;
68 fixed20_12 tmp; 72 fixed20_12 tmp;
69 73
70 atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, 74 if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
71 &frev, &crev, &data_offset); 75 &frev, &crev, &data_offset)) {
72 ptr = rdev->mode_info.atom_context->bios + data_offset; 76 info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
73 info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; 77
74 info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; 78 /* Get various system informations from bios */
75 /* Get various system informations from bios */ 79 switch (crev) {
76 switch (crev) { 80 case 1:
77 case 1: 81 tmp.full = rfixed_const(100);
78 tmp.full = rfixed_const(100); 82 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
79 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); 83 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
80 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 84 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
81 rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); 85 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
82 rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); 86 rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
83 rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); 87 break;
84 break; 88 case 2:
85 case 2: 89 tmp.full = rfixed_const(100);
86 tmp.full = rfixed_const(100); 90 rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
87 rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); 91 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
88 rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); 92 rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
89 rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); 93 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
90 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); 94 rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
91 rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); 95 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
92 rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); 96 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
93 rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); 97 break;
94 break; 98 default:
95 default: 99 tmp.full = rfixed_const(100);
100 /* We assume the slower possible clock ie worst case */
101 /* DDR 333Mhz */
102 rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
103 /* FIXME: system clock ? */
104 rdev->pm.igp_system_mclk.full = rfixed_const(100);
105 rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
106 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
107 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
108 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
109 break;
110 }
111 } else {
96 tmp.full = rfixed_const(100); 112 tmp.full = rfixed_const(100);
97 /* We assume the slower possible clock ie worst case */ 113 /* We assume the slower possible clock ie worst case */
98 /* DDR 333Mhz */ 114 /* DDR 333Mhz */
@@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev)
103 rdev->pm.igp_ht_link_clk.full = rfixed_const(200); 119 rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
104 rdev->pm.igp_ht_link_width.full = rfixed_const(8); 120 rdev->pm.igp_ht_link_width.full = rfixed_const(8);
105 DRM_ERROR("No integrated system info for your GPU, using safe default\n"); 121 DRM_ERROR("No integrated system info for your GPU, using safe default\n");
106 break;
107 } 122 }
108 /* Compute various bandwidth */ 123 /* Compute various bandwidth */
109 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ 124 /* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
@@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev)
131 146
132void rs690_mc_init(struct radeon_device *rdev) 147void rs690_mc_init(struct radeon_device *rdev)
133{ 148{
134 fixed20_12 a;
135 u64 base; 149 u64 base;
136 150
137 rs400_gart_adjust_size(rdev); 151 rs400_gart_adjust_size(rdev);
@@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev)
145 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 159 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
146 base = G_000100_MC_FB_START(base) << 16; 160 base = G_000100_MC_FB_START(base) << 16;
147 rs690_pm_info(rdev); 161 rs690_pm_info(rdev);
148 /* FIXME: we should enforce default clock in case GPU is not in
149 * default setup
150 */
151 a.full = rfixed_const(100);
152 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
153 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
154 a.full = rfixed_const(16);
155 /* core_bandwidth = sclk(Mhz) * 16 */
156 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
157 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
158 radeon_vram_location(rdev, &rdev->mc, base); 163 radeon_vram_location(rdev, &rdev->mc, base);
159 radeon_gtt_location(rdev, &rdev->mc); 164 radeon_gtt_location(rdev, &rdev->mc);
165 radeon_update_bandwidth_info(rdev);
160} 166}
161 167
162void rs690_line_buffer_adjust(struct radeon_device *rdev, 168void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
394 struct drm_display_mode *mode1 = NULL; 400 struct drm_display_mode *mode1 = NULL;
395 struct rs690_watermark wm0; 401 struct rs690_watermark wm0;
396 struct rs690_watermark wm1; 402 struct rs690_watermark wm1;
397 u32 tmp; 403 u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
398 fixed20_12 priority_mark02, priority_mark12, fill_rate; 404 fixed20_12 priority_mark02, priority_mark12, fill_rate;
399 fixed20_12 a, b; 405 fixed20_12 a, b;
400 406
407 radeon_update_display_priority(rdev);
408
401 if (rdev->mode_info.crtcs[0]->base.enabled) 409 if (rdev->mode_info.crtcs[0]->base.enabled)
402 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 410 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
403 if (rdev->mode_info.crtcs[1]->base.enabled) 411 if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
407 * modes if the user specifies HIGH for displaypriority 415 * modes if the user specifies HIGH for displaypriority
408 * option. 416 * option.
409 */ 417 */
410 if (rdev->disp_priority == 2) { 418 if ((rdev->disp_priority == 2) &&
419 ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
411 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); 420 tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
412 tmp &= C_000104_MC_DISP0R_INIT_LAT; 421 tmp &= C_000104_MC_DISP0R_INIT_LAT;
413 tmp &= C_000104_MC_DISP1R_INIT_LAT; 422 tmp &= C_000104_MC_DISP1R_INIT_LAT;
@@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
482 priority_mark12.full = 0; 491 priority_mark12.full = 0;
483 if (wm1.priority_mark_max.full > priority_mark12.full) 492 if (wm1.priority_mark_max.full > priority_mark12.full)
484 priority_mark12.full = wm1.priority_mark_max.full; 493 priority_mark12.full = wm1.priority_mark_max.full;
485 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 494 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
486 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 495 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
487 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 496 if (rdev->disp_priority == 2) {
488 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 497 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
498 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
499 }
500 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
501 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
502 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
503 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
489 } else if (mode0) { 504 } else if (mode0) {
490 if (rfixed_trunc(wm0.dbpp) > 64) 505 if (rfixed_trunc(wm0.dbpp) > 64)
491 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); 506 a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
512 priority_mark02.full = 0; 527 priority_mark02.full = 0;
513 if (wm0.priority_mark_max.full > priority_mark02.full) 528 if (wm0.priority_mark_max.full > priority_mark02.full)
514 priority_mark02.full = wm0.priority_mark_max.full; 529 priority_mark02.full = wm0.priority_mark_max.full;
515 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 530 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
516 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 531 if (rdev->disp_priority == 2)
532 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
533 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
534 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
517 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, 535 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
518 S_006D48_D2MODE_PRIORITY_A_OFF(1)); 536 S_006D48_D2MODE_PRIORITY_A_OFF(1));
519 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, 537 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
@@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
544 priority_mark12.full = 0; 562 priority_mark12.full = 0;
545 if (wm1.priority_mark_max.full > priority_mark12.full) 563 if (wm1.priority_mark_max.full > priority_mark12.full)
546 priority_mark12.full = wm1.priority_mark_max.full; 564 priority_mark12.full = wm1.priority_mark_max.full;
565 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
566 if (rdev->disp_priority == 2)
567 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
547 WREG32(R_006548_D1MODE_PRIORITY_A_CNT, 568 WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
548 S_006548_D1MODE_PRIORITY_A_OFF(1)); 569 S_006548_D1MODE_PRIORITY_A_OFF(1));
549 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, 570 WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
550 S_00654C_D1MODE_PRIORITY_B_OFF(1)); 571 S_00654C_D1MODE_PRIORITY_B_OFF(1));
551 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 572 WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
552 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 573 WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
553 } 574 }
554} 575}
555 576
@@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev)
657 678
658void rs690_fini(struct radeon_device *rdev) 679void rs690_fini(struct radeon_device *rdev)
659{ 680{
681 radeon_pm_fini(rdev);
660 r100_cp_fini(rdev); 682 r100_cp_fini(rdev);
661 r100_wb_fini(rdev); 683 r100_wb_fini(rdev);
662 r100_ib_fini(rdev); 684 r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 62d31e7a897f..36e6398a98ae 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -182,6 +182,9 @@
182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) 182#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) 183#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF 184#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
185#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
186#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
185#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) 188#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
186#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) 189#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
187#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF 190#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index bea747da123f..9035121f4b58 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -26,9 +26,11 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/slab.h>
29#include "drmP.h" 30#include "drmP.h"
30#include "rv515d.h" 31#include "rv515d.h"
31#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
32#include "atom.h" 34#include "atom.h"
33#include "rv515_reg_safe.h" 35#include "rv515_reg_safe.h"
34 36
@@ -279,19 +281,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
279 281
280void rv515_mc_init(struct radeon_device *rdev) 282void rv515_mc_init(struct radeon_device *rdev)
281{ 283{
282 fixed20_12 a;
283 284
284 rv515_vram_get_type(rdev); 285 rv515_vram_get_type(rdev);
285 r100_vram_init_sizes(rdev); 286 r100_vram_init_sizes(rdev);
286 radeon_vram_location(rdev, &rdev->mc, 0); 287 radeon_vram_location(rdev, &rdev->mc, 0);
287 if (!(rdev->flags & RADEON_IS_AGP)) 288 if (!(rdev->flags & RADEON_IS_AGP))
288 radeon_gtt_location(rdev, &rdev->mc); 289 radeon_gtt_location(rdev, &rdev->mc);
289 /* FIXME: we should enforce default clock in case GPU is not in 290 radeon_update_bandwidth_info(rdev);
290 * default setup
291 */
292 a.full = rfixed_const(100);
293 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
294 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
295} 291}
296 292
297uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 293uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -539,6 +535,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
539 535
540void rv515_fini(struct radeon_device *rdev) 536void rv515_fini(struct radeon_device *rdev)
541{ 537{
538 radeon_pm_fini(rdev);
542 r100_cp_fini(rdev); 539 r100_cp_fini(rdev);
543 r100_wb_fini(rdev); 540 r100_wb_fini(rdev);
544 r100_ib_fini(rdev); 541 r100_ib_fini(rdev);
@@ -1020,7 +1017,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1020 struct drm_display_mode *mode1 = NULL; 1017 struct drm_display_mode *mode1 = NULL;
1021 struct rv515_watermark wm0; 1018 struct rv515_watermark wm0;
1022 struct rv515_watermark wm1; 1019 struct rv515_watermark wm1;
1023 u32 tmp; 1020 u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
1024 fixed20_12 priority_mark02, priority_mark12, fill_rate; 1021 fixed20_12 priority_mark02, priority_mark12, fill_rate;
1025 fixed20_12 a, b; 1022 fixed20_12 a, b;
1026 1023
@@ -1088,10 +1085,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1088 priority_mark12.full = 0; 1085 priority_mark12.full = 0;
1089 if (wm1.priority_mark_max.full > priority_mark12.full) 1086 if (wm1.priority_mark_max.full > priority_mark12.full)
1090 priority_mark12.full = wm1.priority_mark_max.full; 1087 priority_mark12.full = wm1.priority_mark_max.full;
1091 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 1088 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
1092 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 1089 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
1093 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 1090 if (rdev->disp_priority == 2) {
1094 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 1091 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1092 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1093 }
1094 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1095 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
1096 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1097 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
1095 } else if (mode0) { 1098 } else if (mode0) {
1096 if (rfixed_trunc(wm0.dbpp) > 64) 1099 if (rfixed_trunc(wm0.dbpp) > 64)
1097 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); 1100 a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1118,8 +1121,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1118 priority_mark02.full = 0; 1121 priority_mark02.full = 0;
1119 if (wm0.priority_mark_max.full > priority_mark02.full) 1122 if (wm0.priority_mark_max.full > priority_mark02.full)
1120 priority_mark02.full = wm0.priority_mark_max.full; 1123 priority_mark02.full = wm0.priority_mark_max.full;
1121 WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); 1124 d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
1122 WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); 1125 if (rdev->disp_priority == 2)
1126 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1127 WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
1128 WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
1123 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1129 WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1124 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 1130 WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1125 } else { 1131 } else {
@@ -1148,10 +1154,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
1148 priority_mark12.full = 0; 1154 priority_mark12.full = 0;
1149 if (wm1.priority_mark_max.full > priority_mark12.full) 1155 if (wm1.priority_mark_max.full > priority_mark12.full)
1150 priority_mark12.full = wm1.priority_mark_max.full; 1156 priority_mark12.full = wm1.priority_mark_max.full;
1157 d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
1158 if (rdev->disp_priority == 2)
1159 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
1151 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); 1160 WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
1152 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); 1161 WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
1153 WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); 1162 WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
1154 WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); 1163 WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
1155 } 1164 }
1156} 1165}
1157 1166
@@ -1161,6 +1170,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1161 struct drm_display_mode *mode0 = NULL; 1170 struct drm_display_mode *mode0 = NULL;
1162 struct drm_display_mode *mode1 = NULL; 1171 struct drm_display_mode *mode1 = NULL;
1163 1172
1173 radeon_update_display_priority(rdev);
1174
1164 if (rdev->mode_info.crtcs[0]->base.enabled) 1175 if (rdev->mode_info.crtcs[0]->base.enabled)
1165 mode0 = &rdev->mode_info.crtcs[0]->base.mode; 1176 mode0 = &rdev->mode_info.crtcs[0]->base.mode;
1166 if (rdev->mode_info.crtcs[1]->base.enabled) 1177 if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -1170,7 +1181,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1170 * modes if the user specifies HIGH for displaypriority 1181 * modes if the user specifies HIGH for displaypriority
1171 * option. 1182 * option.
1172 */ 1183 */
1173 if (rdev->disp_priority == 2) { 1184 if ((rdev->disp_priority == 2) &&
1185 (rdev->family == CHIP_RV515)) {
1174 tmp = RREG32_MC(MC_MISC_LAT_TIMER); 1186 tmp = RREG32_MC(MC_MISC_LAT_TIMER);
1175 tmp &= ~MC_DISP1R_INIT_LAT_MASK; 1187 tmp &= ~MC_DISP1R_INIT_LAT_MASK;
1176 tmp &= ~MC_DISP0R_INIT_LAT_MASK; 1188 tmp &= ~MC_DISP0R_INIT_LAT_MASK;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 37887dee12af..97958a64df1a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -27,8 +27,10 @@
27 */ 27 */
28#include <linux/firmware.h> 28#include <linux/firmware.h>
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30#include <linux/slab.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "radeon.h" 32#include "radeon.h"
33#include "radeon_asic.h"
32#include "radeon_drm.h" 34#include "radeon_drm.h"
33#include "rv770d.h" 35#include "rv770d.h"
34#include "atom.h" 36#include "atom.h"
@@ -125,9 +127,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
125 127
126void rv770_pcie_gart_fini(struct radeon_device *rdev) 128void rv770_pcie_gart_fini(struct radeon_device *rdev)
127{ 129{
130 radeon_gart_fini(rdev);
128 rv770_pcie_gart_disable(rdev); 131 rv770_pcie_gart_disable(rdev);
129 radeon_gart_table_vram_free(rdev); 132 radeon_gart_table_vram_free(rdev);
130 radeon_gart_fini(rdev);
131} 133}
132 134
133 135
@@ -647,10 +649,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
647 649
648 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 650 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
649 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 651 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
652 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
650 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 653 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
651 654
652 WREG32(CGTS_SYS_TCC_DISABLE, 0); 655 WREG32(CGTS_SYS_TCC_DISABLE, 0);
653 WREG32(CGTS_TCC_DISABLE, 0); 656 WREG32(CGTS_TCC_DISABLE, 0);
657 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
658 WREG32(CGTS_USER_TCC_DISABLE, 0);
654 659
655 num_qd_pipes = 660 num_qd_pipes =
656 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 661 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
@@ -864,7 +869,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
864 869
865int rv770_mc_init(struct radeon_device *rdev) 870int rv770_mc_init(struct radeon_device *rdev)
866{ 871{
867 fixed20_12 a;
868 u32 tmp; 872 u32 tmp;
869 int chansize, numchan; 873 int chansize, numchan;
870 874
@@ -908,12 +912,8 @@ int rv770_mc_init(struct radeon_device *rdev)
908 rdev->mc.real_vram_size = rdev->mc.aper_size; 912 rdev->mc.real_vram_size = rdev->mc.aper_size;
909 } 913 }
910 r600_vram_gtt_location(rdev, &rdev->mc); 914 r600_vram_gtt_location(rdev, &rdev->mc);
911 /* FIXME: we should enforce default clock in case GPU is not in 915 radeon_update_bandwidth_info(rdev);
912 * default setup 916
913 */
914 a.full = rfixed_const(100);
915 rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
916 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
917 return 0; 917 return 0;
918} 918}
919 919
@@ -1013,6 +1013,13 @@ int rv770_resume(struct radeon_device *rdev)
1013 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1013 DRM_ERROR("radeon: failled testing IB (%d).\n", r);
1014 return r; 1014 return r;
1015 } 1015 }
1016
1017 r = r600_audio_init(rdev);
1018 if (r) {
1019 dev_err(rdev->dev, "radeon: audio init failed\n");
1020 return r;
1021 }
1022
1016 return r; 1023 return r;
1017 1024
1018} 1025}
@@ -1021,6 +1028,7 @@ int rv770_suspend(struct radeon_device *rdev)
1021{ 1028{
1022 int r; 1029 int r;
1023 1030
1031 r600_audio_fini(rdev);
1024 /* FIXME: we should wait for ring to be empty */ 1032 /* FIXME: we should wait for ring to be empty */
1025 r700_cp_stop(rdev); 1033 r700_cp_stop(rdev);
1026 rdev->cp.ready = false; 1034 rdev->cp.ready = false;
@@ -1144,11 +1152,19 @@ int rv770_init(struct radeon_device *rdev)
1144 } 1152 }
1145 } 1153 }
1146 } 1154 }
1155
1156 r = r600_audio_init(rdev);
1157 if (r) {
1158 dev_err(rdev->dev, "radeon: audio init failed\n");
1159 return r;
1160 }
1161
1147 return 0; 1162 return 0;
1148} 1163}
1149 1164
1150void rv770_fini(struct radeon_device *rdev) 1165void rv770_fini(struct radeon_device *rdev)
1151{ 1166{
1167 radeon_pm_fini(rdev);
1152 r600_blit_fini(rdev); 1168 r600_blit_fini(rdev);
1153 r600_cp_fini(rdev); 1169 r600_cp_fini(rdev);
1154 r600_wb_fini(rdev); 1170 r600_wb_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 4648ed2f0143..4bf69c404491 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -35,6 +35,7 @@
35#include "ttm/ttm_placement.h" 35#include "ttm/ttm_placement.h"
36#include <linux/agp_backend.h> 36#include <linux/agp_backend.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/slab.h>
38#include <linux/io.h> 39#include <linux/io.h>
39#include <asm/agp.h> 40#include <asm/agp.h>
40 41
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 89c38c49066f..dd47b2a9a791 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
1425 1425
1426 atomic_set(&glob->bo_count, 0); 1426 atomic_set(&glob->bo_count, 0);
1427 1427
1428 kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type); 1428 ret = kobject_init_and_add(
1429 ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects"); 1429 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1430 if (unlikely(ret != 0)) 1430 if (unlikely(ret != 0))
1431 kobject_put(&glob->kobj); 1431 kobject_put(&glob->kobj);
1432 return ret; 1432 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 5ca37a58a98c..d764e82e799b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/highmem.h> 34#include <linux/highmem.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/slab.h>
36#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
37#include <linux/module.h> 38#include <linux/module.h>
38 39
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index eb143e04d402..801b702566e6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -32,6 +32,7 @@
32#include <linux/wait.h> 32#include <linux/wait.h>
33#include <linux/mm.h> 33#include <linux/mm.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/slab.h>
35 36
36#define TTM_MEMORY_ALLOC_RETRIES 4 37#define TTM_MEMORY_ALLOC_RETRIES 4
37 38
@@ -260,8 +261,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
260 zone->used_mem = 0; 261 zone->used_mem = 0;
261 zone->glob = glob; 262 zone->glob = glob;
262 glob->zone_kernel = zone; 263 glob->zone_kernel = zone;
263 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 264 ret = kobject_init_and_add(
264 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 265 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
265 if (unlikely(ret != 0)) { 266 if (unlikely(ret != 0)) {
266 kobject_put(&zone->kobj); 267 kobject_put(&zone->kobj);
267 return ret; 268 return ret;
@@ -296,8 +297,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
296 zone->used_mem = 0; 297 zone->used_mem = 0;
297 zone->glob = glob; 298 zone->glob = glob;
298 glob->zone_highmem = zone; 299 glob->zone_highmem = zone;
299 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 300 ret = kobject_init_and_add(
300 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 301 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
301 if (unlikely(ret != 0)) { 302 if (unlikely(ret != 0)) {
302 kobject_put(&zone->kobj); 303 kobject_put(&zone->kobj);
303 return ret; 304 return ret;
@@ -343,8 +344,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
343 zone->used_mem = 0; 344 zone->used_mem = 0;
344 zone->glob = glob; 345 zone->glob = glob;
345 glob->zone_dma32 = zone; 346 glob->zone_dma32 = zone;
346 kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type); 347 ret = kobject_init_and_add(
347 ret = kobject_add(&zone->kobj, &glob->kobj, zone->name); 348 &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
348 if (unlikely(ret != 0)) { 349 if (unlikely(ret != 0)) {
349 kobject_put(&zone->kobj); 350 kobject_put(&zone->kobj);
350 return ret; 351 return ret;
@@ -365,10 +366,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
365 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 366 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
366 INIT_WORK(&glob->work, ttm_shrink_work); 367 INIT_WORK(&glob->work, ttm_shrink_work);
367 init_waitqueue_head(&glob->queue); 368 init_waitqueue_head(&glob->queue);
368 kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type); 369 ret = kobject_init_and_add(
369 ret = kobject_add(&glob->kobj, 370 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
370 ttm_get_kobj(),
371 "memory_accounting");
372 if (unlikely(ret != 0)) { 371 if (unlikely(ret != 0)) {
373 kobject_put(&glob->kobj); 372 kobject_put(&glob->kobj);
374 return ret; 373 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a759170763bb..d5fd5b8faeb3 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,13 +28,14 @@
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30 30
31#include <linux/vmalloc.h>
32#include <linux/sched.h> 31#include <linux/sched.h>
33#include <linux/highmem.h> 32#include <linux/highmem.h>
34#include <linux/pagemap.h> 33#include <linux/pagemap.h>
35#include <linux/file.h> 34#include <linux/file.h>
36#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/slab.h>
37#include "drm_cache.h" 37#include "drm_cache.h"
38#include "drm_mem_util.h"
38#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
39#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
40#include "ttm/ttm_placement.h" 41#include "ttm/ttm_placement.h"
@@ -43,32 +44,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
43 44
44/** 45/**
45 * Allocates storage for pointers to the pages that back the ttm. 46 * Allocates storage for pointers to the pages that back the ttm.
46 *
47 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
48 */ 47 */
49static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm) 48static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
50{ 49{
51 unsigned long size = ttm->num_pages * sizeof(*ttm->pages); 50 ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
52 ttm->pages = NULL;
53
54 if (size <= PAGE_SIZE)
55 ttm->pages = kzalloc(size, GFP_KERNEL);
56
57 if (!ttm->pages) {
58 ttm->pages = vmalloc_user(size);
59 if (ttm->pages)
60 ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
61 }
62} 51}
63 52
64static void ttm_tt_free_page_directory(struct ttm_tt *ttm) 53static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
65{ 54{
66 if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) { 55 drm_free_large(ttm->pages);
67 vfree(ttm->pages);
68 ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
69 } else {
70 kfree(ttm->pages);
71 }
72 ttm->pages = NULL; 56 ttm->pages = NULL;
73} 57}
74 58
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 327380888b4a..4c54f043068e 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -40,6 +40,7 @@
40#include "via_dmablit.h" 40#include "via_dmablit.h"
41 41
42#include <linux/pagemap.h> 42#include <linux/pagemap.h>
43#include <linux/slab.h>
43 44
44#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK) 45#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK) 46#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6ec04ac12459..6efac8117c93 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -75,7 +75,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_
75 75
76 DRM_DEBUG("\n"); 76 DRM_DEBUG("\n");
77 77
78 if (fx->lock > VIA_NR_XVMC_LOCKS) 78 if (fx->lock >= VIA_NR_XVMC_LOCKS)
79 return -EFAULT; 79 return -EFAULT;
80 80
81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); 81 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index f20b8bcbef39..30ad13344f7b 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
1config DRM_VMWGFX 1config DRM_VMWGFX
2 tristate "DRM driver for VMware Virtual GPU" 2 tristate "DRM driver for VMware Virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI && FB
4 select FB_DEFERRED_IO 4 select FB_DEFERRED_IO
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA