aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-11 00:56:47 -0500
commit3ef884b4c04e857c283cc77ca70ad8f638d94b0e (patch)
treec8c5b872e836e6ffe8bd08ab3477f9e8260575ed /drivers/gpu
parent4e5df8069b0e4e36c6b528b3be7da298e6f454cd (diff)
parent4361e52ad0372e6fd2240a2207b49a4de1f45ca9 (diff)
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits) drm/radeon/kms: fix warning about cur_placement being uninitialised. drm/ttm: Print debug information on memory manager when eviction fails drm: Add memory manager debug function drm/radeon/kms: restore surface registers on resume. drm/radeon/kms/r600/r700: fallback gracefully on ucode failure drm/ttm: Initialize eviction placement in case the driver callback doesn't drm/radeon/kms: cleanup structure and module if initialization fails drm/radeon/kms: actualy set the eviction placements we choose drm/radeon/kms: Fix NULL ptr dereference drm/radeon/kms/avivo: add support for new pll selection algo drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup drm/radeon/kms: fix return value from fence function. drm/radeon: Remove tests for -ERESTART from the TTM code. drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART. drm/radeon/kms: Convert radeon to new TTM validation API (V2) drm/ttm: Rework validation & memory space allocation (V3) drm: Add search/get functions to get a block in a specific range drm/radeon/kms: fix avivo tiling regression since radeon object rework drm/i915: Remove a debugging printk from hangcheck drm/radeon/kms: make sure i2c id matches ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_crtc.c176
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c (renamed from drivers/gpu/drm/i915/intel_dp_i2c.c)76
-rw-r--r--drivers/gpu/drm/drm_drv.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c328
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_fops.c112
-rw-r--r--drivers/gpu/drm/drm_irq.c130
-rw-r--r--drivers/gpu/drm/drm_mm.c110
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c120
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c163
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h71
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c137
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1036
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c162
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h144
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h44
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c140
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c33
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c790
-rw-r--r--drivers/gpu/drm/radeon/r100.c245
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c25
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1147
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c34
-rw-r--r--drivers/gpu/drm/radeon/r600d.h212
-rw-r--r--drivers/gpu/drm/radeon/radeon.h165
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h70
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c688
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c194
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c276
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c182
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c560
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c94
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c236
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h112
-rw-r--r--drivers/gpu/drm/radeon/rs690.c57
-rw-r--r--drivers/gpu/drm/radeon/rv515.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c79
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c545
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
104 files changed, 10895 insertions, 2924 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a7aabd..91567ac806f1 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
15 15
16drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
17 17
18drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o 18drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
19 19
20obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 20obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
21 21
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3f7c500b2115..5124401f266a 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
125DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, 125DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
126 drm_tv_subconnector_enum_list) 126 drm_tv_subconnector_enum_list)
127 127
128static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
129 { DRM_MODE_DIRTY_OFF, "Off" },
130 { DRM_MODE_DIRTY_ON, "On" },
131 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
132};
133
134DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
135 drm_dirty_info_enum_list)
136
128struct drm_conn_prop_enum_list { 137struct drm_conn_prop_enum_list {
129 int type; 138 int type;
130 char *name; 139 char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
247 mutex_unlock(&dev->mode_config.idr_mutex); 256 mutex_unlock(&dev->mode_config.idr_mutex);
248} 257}
249 258
250void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 259struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
260 uint32_t id, uint32_t type)
251{ 261{
252 struct drm_mode_object *obj = NULL; 262 struct drm_mode_object *obj = NULL;
253 263
@@ -802,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
802EXPORT_SYMBOL(drm_mode_create_dithering_property); 812EXPORT_SYMBOL(drm_mode_create_dithering_property);
803 813
804/** 814/**
815 * drm_mode_create_dirty_property - create dirty property
816 * @dev: DRM device
817 *
818 * Called by a driver the first time it's needed, must be attached to desired
819 * connectors.
820 */
821int drm_mode_create_dirty_info_property(struct drm_device *dev)
822{
823 struct drm_property *dirty_info;
824 int i;
825
826 if (dev->mode_config.dirty_info_property)
827 return 0;
828
829 dirty_info =
830 drm_property_create(dev, DRM_MODE_PROP_ENUM |
831 DRM_MODE_PROP_IMMUTABLE,
832 "dirty",
833 ARRAY_SIZE(drm_dirty_info_enum_list));
834 for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
835 drm_property_add_enum(dirty_info, i,
836 drm_dirty_info_enum_list[i].type,
837 drm_dirty_info_enum_list[i].name);
838 dev->mode_config.dirty_info_property = dirty_info;
839
840 return 0;
841}
842EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
843
844/**
805 * drm_mode_config_init - initialize DRM mode_configuration structure 845 * drm_mode_config_init - initialize DRM mode_configuration structure
806 * @dev: DRM device 846 * @dev: DRM device
807 * 847 *
@@ -1753,6 +1793,71 @@ out:
1753 return ret; 1793 return ret;
1754} 1794}
1755 1795
1796int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1797 void *data, struct drm_file *file_priv)
1798{
1799 struct drm_clip_rect __user *clips_ptr;
1800 struct drm_clip_rect *clips = NULL;
1801 struct drm_mode_fb_dirty_cmd *r = data;
1802 struct drm_mode_object *obj;
1803 struct drm_framebuffer *fb;
1804 unsigned flags;
1805 int num_clips;
1806 int ret = 0;
1807
1808 mutex_lock(&dev->mode_config.mutex);
1809 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
1810 if (!obj) {
1811 DRM_ERROR("invalid framebuffer id\n");
1812 ret = -EINVAL;
1813 goto out_err1;
1814 }
1815 fb = obj_to_fb(obj);
1816
1817 num_clips = r->num_clips;
1818 clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
1819
1820 if (!num_clips != !clips_ptr) {
1821 ret = -EINVAL;
1822 goto out_err1;
1823 }
1824
1825 flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
1826
1827 /* If userspace annotates copy, clips must come in pairs */
1828 if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
1829 ret = -EINVAL;
1830 goto out_err1;
1831 }
1832
1833 if (num_clips && clips_ptr) {
1834 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
1835 if (!clips) {
1836 ret = -ENOMEM;
1837 goto out_err1;
1838 }
1839
1840 ret = copy_from_user(clips, clips_ptr,
1841 num_clips * sizeof(*clips));
1842 if (ret)
1843 goto out_err2;
1844 }
1845
1846 if (fb->funcs->dirty) {
1847 ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
1848 } else {
1849 ret = -ENOSYS;
1850 goto out_err2;
1851 }
1852
1853out_err2:
1854 kfree(clips);
1855out_err1:
1856 mutex_unlock(&dev->mode_config.mutex);
1857 return ret;
1858}
1859
1860
1756/** 1861/**
1757 * drm_fb_release - remove and free the FBs on this file 1862 * drm_fb_release - remove and free the FBs on this file
1758 * @filp: file * from the ioctl 1863 * @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@ out:
2478 mutex_unlock(&dev->mode_config.mutex); 2583 mutex_unlock(&dev->mode_config.mutex);
2479 return ret; 2584 return ret;
2480} 2585}
2586
2587int drm_mode_page_flip_ioctl(struct drm_device *dev,
2588 void *data, struct drm_file *file_priv)
2589{
2590 struct drm_mode_crtc_page_flip *page_flip = data;
2591 struct drm_mode_object *obj;
2592 struct drm_crtc *crtc;
2593 struct drm_framebuffer *fb;
2594 struct drm_pending_vblank_event *e = NULL;
2595 unsigned long flags;
2596 int ret = -EINVAL;
2597
2598 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
2599 page_flip->reserved != 0)
2600 return -EINVAL;
2601
2602 mutex_lock(&dev->mode_config.mutex);
2603 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
2604 if (!obj)
2605 goto out;
2606 crtc = obj_to_crtc(obj);
2607
2608 if (crtc->funcs->page_flip == NULL)
2609 goto out;
2610
2611 obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
2612 if (!obj)
2613 goto out;
2614 fb = obj_to_fb(obj);
2615
2616 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
2617 ret = -ENOMEM;
2618 spin_lock_irqsave(&dev->event_lock, flags);
2619 if (file_priv->event_space < sizeof e->event) {
2620 spin_unlock_irqrestore(&dev->event_lock, flags);
2621 goto out;
2622 }
2623 file_priv->event_space -= sizeof e->event;
2624 spin_unlock_irqrestore(&dev->event_lock, flags);
2625
2626 e = kzalloc(sizeof *e, GFP_KERNEL);
2627 if (e == NULL) {
2628 spin_lock_irqsave(&dev->event_lock, flags);
2629 file_priv->event_space += sizeof e->event;
2630 spin_unlock_irqrestore(&dev->event_lock, flags);
2631 goto out;
2632 }
2633
2634 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
2635 e->event.base.length = sizeof e->event;
2636 e->event.user_data = page_flip->user_data;
2637 e->base.event = &e->event.base;
2638 e->base.file_priv = file_priv;
2639 e->base.destroy =
2640 (void (*) (struct drm_pending_event *)) kfree;
2641 }
2642
2643 ret = crtc->funcs->page_flip(crtc, fb, e);
2644 if (ret) {
2645 spin_lock_irqsave(&dev->event_lock, flags);
2646 file_priv->event_space += sizeof e->event;
2647 spin_unlock_irqrestore(&dev->event_lock, flags);
2648 kfree(e);
2649 }
2650
2651out:
2652 mutex_unlock(&dev->mode_config.mutex);
2653 return ret;
2654}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110a7168..4231d6db72ec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
109 109
110 count = (*connector_funcs->get_modes)(connector); 110 count = (*connector_funcs->get_modes)(connector);
111 if (!count) { 111 if (!count) {
112 count = drm_add_modes_noedid(connector, 800, 600); 112 count = drm_add_modes_noedid(connector, 1024, 768);
113 if (!count) 113 if (!count)
114 return 0; 114 return 0;
115 } 115 }
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
1020{ 1020{
1021 int count = 0; 1021 int count = 0;
1022 1022
1023 /* disable all the possible outputs/crtcs before entering KMS mode */
1024 drm_helper_disable_unused_functions(dev);
1025
1023 drm_fb_helper_parse_command_line(dev); 1026 drm_fb_helper_parse_command_line(dev);
1024 1027
1025 count = drm_helper_probe_connector_modes(dev, 1028 count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f57d2d4..548887c8506f 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include "intel_dp.h" 31#include "drm_dp_helper.h"
32#include "drmP.h" 32#include "drmP.h"
33 33
34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ 34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
35
36#define MODE_I2C_START 1
37#define MODE_I2C_WRITE 2
38#define MODE_I2C_READ 4
39#define MODE_I2C_STOP 8
40
41static int 35static int
42i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, 36i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
43 uint8_t write_byte, uint8_t *read_byte) 37 uint8_t write_byte, uint8_t *read_byte)
44{ 38{
45 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 39 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
46 uint16_t address = algo_data->address;
47 uint8_t msg[5];
48 uint8_t reply[2];
49 int msg_bytes;
50 int reply_bytes;
51 int ret; 40 int ret;
52 41
53 /* Set up the command byte */ 42 ret = (*algo_data->aux_ch)(adapter, mode,
54 if (mode & MODE_I2C_READ) 43 write_byte, read_byte);
55 msg[0] = AUX_I2C_READ << 4; 44 return ret;
56 else
57 msg[0] = AUX_I2C_WRITE << 4;
58
59 if (!(mode & MODE_I2C_STOP))
60 msg[0] |= AUX_I2C_MOT << 4;
61
62 msg[1] = address >> 8;
63 msg[2] = address;
64
65 switch (mode) {
66 case MODE_I2C_WRITE:
67 msg[3] = 0;
68 msg[4] = write_byte;
69 msg_bytes = 5;
70 reply_bytes = 1;
71 break;
72 case MODE_I2C_READ:
73 msg[3] = 0;
74 msg_bytes = 4;
75 reply_bytes = 2;
76 break;
77 default:
78 msg_bytes = 3;
79 reply_bytes = 1;
80 break;
81 }
82
83 for (;;) {
84 ret = (*algo_data->aux_ch)(adapter,
85 msg, msg_bytes,
86 reply, reply_bytes);
87 if (ret < 0) {
88 DRM_DEBUG("aux_ch failed %d\n", ret);
89 return ret;
90 }
91 switch (reply[0] & AUX_I2C_REPLY_MASK) {
92 case AUX_I2C_REPLY_ACK:
93 if (mode == MODE_I2C_READ) {
94 *read_byte = reply[1];
95 }
96 return reply_bytes - 1;
97 case AUX_I2C_REPLY_NACK:
98 DRM_DEBUG("aux_ch nack\n");
99 return -EREMOTEIO;
100 case AUX_I2C_REPLY_DEFER:
101 DRM_DEBUG("aux_ch defer\n");
102 udelay(100);
103 break;
104 default:
105 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
106 return -EREMOTEIO;
107 }
108 }
109} 45}
110 46
111/* 47/*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
224 if (ret >= 0) 160 if (ret >= 0)
225 ret = num; 161 ret = num;
226 i2c_algo_dp_aux_stop(adapter, reading); 162 i2c_algo_dp_aux_stop(adapter, reading);
227 DRM_DEBUG("dp_aux_xfer return %d\n", ret); 163 DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
228 return ret; 164 return ret;
229} 165}
230 166
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63deea6..ff2f1042cb44 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
148}; 150};
149 151
150#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@ module_init(drm_core_init);
366module_exit(drm_core_exit); 368module_exit(drm_core_exit);
367 369
368/** 370/**
371 * Copy and IOCTL return string to user space
372 */
373static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
374{
375 int len;
376
377 /* don't overflow userbuf */
378 len = strlen(value);
379 if (len > *buf_len)
380 len = *buf_len;
381
382 /* let userspace know exact length of driver value (which could be
383 * larger than the userspace-supplied buffer) */
384 *buf_len = strlen(value);
385
386 /* finally, try filling in the userbuf */
387 if (len && buf)
388 if (copy_to_user(buf, value, len))
389 return -EFAULT;
390 return 0;
391}
392
393/**
369 * Get version information 394 * Get version information
370 * 395 *
371 * \param inode device inode. 396 * \param inode device inode.
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
380 struct drm_file *file_priv) 405 struct drm_file *file_priv)
381{ 406{
382 struct drm_version *version = data; 407 struct drm_version *version = data;
383 int len; 408 int err;
384 409
385 version->version_major = dev->driver->major; 410 version->version_major = dev->driver->major;
386 version->version_minor = dev->driver->minor; 411 version->version_minor = dev->driver->minor;
387 version->version_patchlevel = dev->driver->patchlevel; 412 version->version_patchlevel = dev->driver->patchlevel;
388 DRM_COPY(version->name, dev->driver->name); 413 err = drm_copy_field(version->name, &version->name_len,
389 DRM_COPY(version->date, dev->driver->date); 414 dev->driver->name);
390 DRM_COPY(version->desc, dev->driver->desc); 415 if (!err)
391 416 err = drm_copy_field(version->date, &version->date_len,
392 return 0; 417 dev->driver->date);
418 if (!err)
419 err = drm_copy_field(version->desc, &version->desc_len,
420 dev->driver->desc);
421
422 return err;
393} 423}
394 424
395/** 425/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63d506e..c39b26f1abed 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
123 */ 123 */
124static bool edid_is_valid(struct edid *edid) 124static bool edid_is_valid(struct edid *edid)
125{ 125{
126 int i; 126 int i, score = 0;
127 u8 csum = 0; 127 u8 csum = 0;
128 u8 *raw_edid = (u8 *)edid; 128 u8 *raw_edid = (u8 *)edid;
129 129
130 if (memcmp(edid->header, edid_header, sizeof(edid_header))) 130 for (i = 0; i < sizeof(edid_header); i++)
131 goto bad; 131 if (raw_edid[i] == edid_header[i])
132 if (edid->version != 1) { 132 score++;
133 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); 133
134 if (score == 8) ;
135 else if (score >= 6) {
136 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
137 memcpy(raw_edid, edid_header, sizeof(edid_header));
138 } else
134 goto bad; 139 goto bad;
135 }
136 if (edid->revision > 4)
137 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
138 140
139 for (i = 0; i < EDID_LENGTH; i++) 141 for (i = 0; i < EDID_LENGTH; i++)
140 csum += raw_edid[i]; 142 csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
143 goto bad; 145 goto bad;
144 } 146 }
145 147
148 if (edid->version != 1) {
149 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
150 goto bad;
151 }
152
153 if (edid->revision > 4)
154 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
155
146 return 1; 156 return 1;
147 157
148bad: 158bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
481 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, 491 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
482 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 492 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
483}; 493};
494static const int drm_num_dmt_modes =
495 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
484 496
485static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, 497static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
486 int hsize, int vsize, int fresh) 498 int hsize, int vsize, int fresh)
487{ 499{
488 int i, count; 500 int i;
489 struct drm_display_mode *ptr, *mode; 501 struct drm_display_mode *ptr, *mode;
490 502
491 count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
492 mode = NULL; 503 mode = NULL;
493 for (i = 0; i < count; i++) { 504 for (i = 0; i < drm_num_dmt_modes; i++) {
494 ptr = &drm_dmt_modes[i]; 505 ptr = &drm_dmt_modes[i];
495 if (hsize == ptr->hdisplay && 506 if (hsize == ptr->hdisplay &&
496 vsize == ptr->vdisplay && 507 vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
834 return modes; 845 return modes;
835} 846}
836 847
848/*
849 * XXX fix this for:
850 * - GTF secondary curve formula
851 * - EDID 1.4 range offsets
852 * - CVT extended bits
853 */
854static bool
855mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
856{
857 struct detailed_data_monitor_range *range;
858 int hsync, vrefresh;
859
860 range = &timing->data.other_data.data.range;
861
862 hsync = drm_mode_hsync(mode);
863 vrefresh = drm_mode_vrefresh(mode);
864
865 if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
866 return false;
867
868 if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
869 return false;
870
871 if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
872 /* be forgiving since it's in units of 10MHz */
873 int max_clock = range->pixel_clock_mhz * 10 + 9;
874 max_clock *= 1000;
875 if (mode->clock > max_clock)
876 return false;
877 }
878
879 return true;
880}
881
882/*
883 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
884 * need to account for them.
885 */
886static int drm_gtf_modes_for_range(struct drm_connector *connector,
887 struct detailed_timing *timing)
888{
889 int i, modes = 0;
890 struct drm_display_mode *newmode;
891 struct drm_device *dev = connector->dev;
892
893 for (i = 0; i < drm_num_dmt_modes; i++) {
894 if (mode_in_range(drm_dmt_modes + i, timing)) {
895 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
896 if (newmode) {
897 drm_mode_probed_add(connector, newmode);
898 modes++;
899 }
900 }
901 }
902
903 return modes;
904}
905
906static int drm_cvt_modes(struct drm_connector *connector,
907 struct detailed_timing *timing)
908{
909 int i, j, modes = 0;
910 struct drm_display_mode *newmode;
911 struct drm_device *dev = connector->dev;
912 struct cvt_timing *cvt;
913 const int rates[] = { 60, 85, 75, 60, 50 };
914
915 for (i = 0; i < 4; i++) {
916 int width, height;
917 cvt = &(timing->data.other_data.data.cvt[i]);
918
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
920 switch (cvt->code[1] & 0xc0) {
921 case 0x00:
922 width = height * 4 / 3;
923 break;
924 case 0x40:
925 width = height * 16 / 9;
926 break;
927 case 0x80:
928 width = height * 16 / 10;
929 break;
930 case 0xc0:
931 width = height * 15 / 9;
932 break;
933 }
934
935 for (j = 1; j < 5; j++) {
936 if (cvt->code[2] & (1 << j)) {
937 newmode = drm_cvt_mode(dev, width, height,
938 rates[j], j == 0,
939 false, false);
940 if (newmode) {
941 drm_mode_probed_add(connector, newmode);
942 modes++;
943 }
944 }
945 }
946 }
947
948 return modes;
949}
950
951static int add_detailed_modes(struct drm_connector *connector,
952 struct detailed_timing *timing,
953 struct edid *edid, u32 quirks, int preferred)
954{
955 int i, modes = 0;
956 struct detailed_non_pixel *data = &timing->data.other_data;
957 int timing_level = standard_timing_level(edid);
958 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
959 struct drm_display_mode *newmode;
960 struct drm_device *dev = connector->dev;
961
962 if (timing->pixel_clock) {
963 newmode = drm_mode_detailed(dev, edid, timing, quirks);
964 if (!newmode)
965 return 0;
966
967 if (preferred)
968 newmode->type |= DRM_MODE_TYPE_PREFERRED;
969
970 drm_mode_probed_add(connector, newmode);
971 return 1;
972 }
973
974 /* other timing types */
975 switch (data->type) {
976 case EDID_DETAIL_MONITOR_RANGE:
977 if (gtf)
978 modes += drm_gtf_modes_for_range(connector, timing);
979 break;
980 case EDID_DETAIL_STD_MODES:
981 /* Six modes per detailed section */
982 for (i = 0; i < 6; i++) {
983 struct std_timing *std;
984 struct drm_display_mode *newmode;
985
986 std = &data->data.timings[i];
987 newmode = drm_mode_std(dev, std, edid->revision,
988 timing_level);
989 if (newmode) {
990 drm_mode_probed_add(connector, newmode);
991 modes++;
992 }
993 }
994 break;
995 case EDID_DETAIL_CVT_3BYTE:
996 modes += drm_cvt_modes(connector, timing);
997 break;
998 default:
999 break;
1000 }
1001
1002 return modes;
1003}
1004
837/** 1005/**
838 * add_detailed_modes - get detailed mode info from EDID data 1006 * add_detailed_info - get detailed mode info from EDID data
839 * @connector: attached connector 1007 * @connector: attached connector
840 * @edid: EDID block to scan 1008 * @edid: EDID block to scan
841 * @quirks: quirks to apply 1009 * @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
846static int add_detailed_info(struct drm_connector *connector, 1014static int add_detailed_info(struct drm_connector *connector,
847 struct edid *edid, u32 quirks) 1015 struct edid *edid, u32 quirks)
848{ 1016{
849 struct drm_device *dev = connector->dev; 1017 int i, modes = 0;
850 int i, j, modes = 0;
851 int timing_level;
852
853 timing_level = standard_timing_level(edid);
854 1018
855 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { 1019 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
856 struct detailed_timing *timing = &edid->detailed_timings[i]; 1020 struct detailed_timing *timing = &edid->detailed_timings[i];
857 struct detailed_non_pixel *data = &timing->data.other_data; 1021 int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
858 struct drm_display_mode *newmode;
859
860 /* X server check is version 1.1 or higher */
861 if (edid->version == 1 && edid->revision >= 1 &&
862 !timing->pixel_clock) {
863 /* Other timing or info */
864 switch (data->type) {
865 case EDID_DETAIL_MONITOR_SERIAL:
866 break;
867 case EDID_DETAIL_MONITOR_STRING:
868 break;
869 case EDID_DETAIL_MONITOR_RANGE:
870 /* Get monitor range data */
871 break;
872 case EDID_DETAIL_MONITOR_NAME:
873 break;
874 case EDID_DETAIL_MONITOR_CPDATA:
875 break;
876 case EDID_DETAIL_STD_MODES:
877 for (j = 0; j < 6; i++) {
878 struct std_timing *std;
879 struct drm_display_mode *newmode;
880
881 std = &data->data.timings[j];
882 newmode = drm_mode_std(dev, std,
883 edid->revision,
884 timing_level);
885 if (newmode) {
886 drm_mode_probed_add(connector, newmode);
887 modes++;
888 }
889 }
890 break;
891 default:
892 break;
893 }
894 } else {
895 newmode = drm_mode_detailed(dev, edid, timing, quirks);
896 if (!newmode)
897 continue;
898 1022
899 /* First detailed mode is preferred */ 1023 /* In 1.0, only timings are allowed */
900 if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) 1024 if (!timing->pixel_clock && edid->version == 1 &&
901 newmode->type |= DRM_MODE_TYPE_PREFERRED; 1025 edid->revision == 0)
902 drm_mode_probed_add(connector, newmode); 1026 continue;
903 1027
904 modes++; 1028 modes += add_detailed_modes(connector, timing, edid, quirks,
905 } 1029 preferred);
906 } 1030 }
907 1031
908 return modes; 1032 return modes;
909} 1033}
1034
910/** 1035/**
911 * add_detailed_mode_eedid - get detailed mode info from addtional timing 1036 * add_detailed_mode_eedid - get detailed mode info from addtional timing
912 * EDID block 1037 * EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
920static int add_detailed_info_eedid(struct drm_connector *connector, 1045static int add_detailed_info_eedid(struct drm_connector *connector,
921 struct edid *edid, u32 quirks) 1046 struct edid *edid, u32 quirks)
922{ 1047{
923 struct drm_device *dev = connector->dev; 1048 int i, modes = 0;
924 int i, j, modes = 0;
925 char *edid_ext = NULL; 1049 char *edid_ext = NULL;
926 struct detailed_timing *timing; 1050 struct detailed_timing *timing;
927 struct detailed_non_pixel *data;
928 struct drm_display_mode *newmode;
929 int edid_ext_num; 1051 int edid_ext_num;
930 int start_offset, end_offset; 1052 int start_offset, end_offset;
931 int timing_level; 1053 int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
976 for (i = start_offset; i < end_offset; 1098 for (i = start_offset; i < end_offset;
977 i += sizeof(struct detailed_timing)) { 1099 i += sizeof(struct detailed_timing)) {
978 timing = (struct detailed_timing *)(edid_ext + i); 1100 timing = (struct detailed_timing *)(edid_ext + i);
979 data = &timing->data.other_data; 1101 modes += add_detailed_modes(connector, timing, edid, quirks, 0);
980 /* Detailed mode timing */
981 if (timing->pixel_clock) {
982 newmode = drm_mode_detailed(dev, edid, timing, quirks);
983 if (!newmode)
984 continue;
985
986 drm_mode_probed_add(connector, newmode);
987
988 modes++;
989 continue;
990 }
991
992 /* Other timing or info */
993 switch (data->type) {
994 case EDID_DETAIL_MONITOR_SERIAL:
995 break;
996 case EDID_DETAIL_MONITOR_STRING:
997 break;
998 case EDID_DETAIL_MONITOR_RANGE:
999 /* Get monitor range data */
1000 break;
1001 case EDID_DETAIL_MONITOR_NAME:
1002 break;
1003 case EDID_DETAIL_MONITOR_CPDATA:
1004 break;
1005 case EDID_DETAIL_STD_MODES:
1006 /* Five modes per detailed section */
1007 for (j = 0; j < 5; i++) {
1008 struct std_timing *std;
1009 struct drm_display_mode *newmode;
1010
1011 std = &data->data.timings[j];
1012 newmode = drm_mode_std(dev, std,
1013 edid->revision,
1014 timing_level);
1015 if (newmode) {
1016 drm_mode_probed_add(connector, newmode);
1017 modes++;
1018 }
1019 }
1020 break;
1021 default:
1022 break;
1023 }
1024 } 1102 }
1025 1103
1026 return modes; 1104 return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
1066 struct i2c_adapter *adapter, 1144 struct i2c_adapter *adapter,
1067 char *buf, int len) 1145 char *buf, int len)
1068{ 1146{
1069 int ret; 1147 int i;
1070 1148
1071 ret = drm_do_probe_ddc_edid(adapter, buf, len); 1149 for (i = 0; i < 4; i++) {
1072 if (ret != 0) { 1150 if (drm_do_probe_ddc_edid(adapter, buf, len))
1073 goto end; 1151 return -1;
1074 } 1152 if (edid_is_valid((struct edid *)buf))
1075 if (!edid_is_valid((struct edid *)buf)) { 1153 return 0;
1076 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1077 drm_get_connector_name(connector));
1078 ret = -1;
1079 } 1154 }
1080end: 1155
1081 return ret; 1156 /* repeated checksum failures; warn, but carry on */
1157 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1158 drm_get_connector_name(connector));
1159 return -1;
1082} 1160}
1083 1161
1084/** 1162/**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
1296 ptr->vdisplay > vdisplay) 1374 ptr->vdisplay > vdisplay)
1297 continue; 1375 continue;
1298 } 1376 }
1377 if (drm_mode_vrefresh(ptr) > 61)
1378 continue;
1299 mode = drm_mode_duplicate(dev, ptr); 1379 mode = drm_mode_duplicate(dev, ptr);
1300 if (mode) { 1380 if (mode) {
1301 drm_mode_probed_add(connector, mode); 1381 drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011fa8ba..1b49fa055f4f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
373 mutex_unlock(&dev->mode_config.mutex); 373 mutex_unlock(&dev->mode_config.mutex);
374 } 374 }
375 } 375 }
376 if (dpms_mode == DRM_MODE_DPMS_OFF) { 376 mutex_lock(&dev->mode_config.mutex);
377 mutex_lock(&dev->mode_config.mutex); 377 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
378 crtc_funcs->dpms(crtc, dpms_mode); 378 mutex_unlock(&dev->mode_config.mutex);
379 mutex_unlock(&dev->mode_config.mutex);
380 }
381 } 379 }
382 } 380 }
383} 381}
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
385int drm_fb_helper_blank(int blank, struct fb_info *info) 383int drm_fb_helper_blank(int blank, struct fb_info *info)
386{ 384{
387 switch (blank) { 385 switch (blank) {
386 /* Display: On; HSync: On, VSync: On */
388 case FB_BLANK_UNBLANK: 387 case FB_BLANK_UNBLANK:
389 drm_fb_helper_on(info); 388 drm_fb_helper_on(info);
390 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */
394 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
395 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); 396 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
396 break; 397 break;
398 /* Display: Off; HSync: On, VSync: Off */
397 case FB_BLANK_VSYNC_SUSPEND: 399 case FB_BLANK_VSYNC_SUSPEND:
398 drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); 400 drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
399 break; 401 break;
402 /* Display: Off; HSync: Off, VSync: Off */
400 case FB_BLANK_POWERDOWN: 403 case FB_BLANK_POWERDOWN:
401 drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); 404 drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
402 break; 405 break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
905 908
906 if (new_fb) { 909 if (new_fb) {
907 info->var.pixclock = 0; 910 info->var.pixclock = 0;
908 if (register_framebuffer(info) < 0) 911 ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
912 if (ret)
913 return ret;
914 if (register_framebuffer(info) < 0) {
915 fb_dealloc_cmap(&info->cmap);
909 return -EINVAL; 916 return -EINVAL;
917 }
910 } else { 918 } else {
911 drm_fb_helper_set_par(info); 919 drm_fb_helper_set_par(info);
912 } 920 }
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
936 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 944 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
937 } 945 }
938 drm_fb_helper_crtc_free(helper); 946 drm_fb_helper_crtc_free(helper);
947 fb_dealloc_cmap(&helper->fb->fbdev->cmap);
939} 948}
940EXPORT_SYMBOL(drm_fb_helper_free); 949EXPORT_SYMBOL(drm_fb_helper_free);
941 950
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e3b5ec..08d14df3bb42 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
257 257
258 INIT_LIST_HEAD(&priv->lhead); 258 INIT_LIST_HEAD(&priv->lhead);
259 INIT_LIST_HEAD(&priv->fbs); 259 INIT_LIST_HEAD(&priv->fbs);
260 INIT_LIST_HEAD(&priv->event_list);
261 init_waitqueue_head(&priv->event_wait);
262 priv->event_space = 4096; /* set aside 4k for event buffer */
260 263
261 if (dev->driver->driver_features & DRIVER_GEM) 264 if (dev->driver->driver_features & DRIVER_GEM)
262 drm_gem_open(dev, priv); 265 drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
297 goto out_free; 300 goto out_free;
298 } 301 }
299 } 302 }
303 mutex_lock(&dev->struct_mutex);
304 if (dev->driver->master_set) {
305 ret = dev->driver->master_set(dev, priv, true);
306 if (ret) {
307 /* drop both references if this fails */
308 drm_master_put(&priv->minor->master);
309 drm_master_put(&priv->master);
310 mutex_unlock(&dev->struct_mutex);
311 goto out_free;
312 }
313 }
314 mutex_unlock(&dev->struct_mutex);
300 } else { 315 } else {
301 /* get a reference to the master */ 316 /* get a reference to the master */
302 priv->master = drm_master_get(priv->minor->master); 317 priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
413 } 428 }
414} 429}
415 430
431static void drm_events_release(struct drm_file *file_priv)
432{
433 struct drm_device *dev = file_priv->minor->dev;
434 struct drm_pending_event *e, *et;
435 struct drm_pending_vblank_event *v, *vt;
436 unsigned long flags;
437
438 spin_lock_irqsave(&dev->event_lock, flags);
439
440 /* Remove pending flips */
441 list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
442 if (v->base.file_priv == file_priv) {
443 list_del(&v->base.link);
444 drm_vblank_put(dev, v->pipe);
445 v->base.destroy(&v->base);
446 }
447
448 /* Remove unconsumed events */
449 list_for_each_entry_safe(e, et, &file_priv->event_list, link)
450 e->destroy(e);
451
452 spin_unlock_irqrestore(&dev->event_lock, flags);
453}
454
416/** 455/**
417 * Release file. 456 * Release file.
418 * 457 *
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
451 if (file_priv->minor->master) 490 if (file_priv->minor->master)
452 drm_master_release(dev, filp); 491 drm_master_release(dev, filp);
453 492
493 drm_events_release(file_priv);
494
454 if (dev->driver->driver_features & DRIVER_GEM) 495 if (dev->driver->driver_features & DRIVER_GEM)
455 drm_gem_release(dev, file_priv); 496 drm_gem_release(dev, file_priv);
456 497
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
504 545
505 if (file_priv->minor->master == file_priv->master) { 546 if (file_priv->minor->master == file_priv->master) {
506 /* drop the reference held my the minor */ 547 /* drop the reference held my the minor */
548 if (dev->driver->master_drop)
549 dev->driver->master_drop(dev, file_priv, true);
507 drm_master_put(&file_priv->minor->master); 550 drm_master_put(&file_priv->minor->master);
508 } 551 }
509 } 552 }
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
544} 587}
545EXPORT_SYMBOL(drm_release); 588EXPORT_SYMBOL(drm_release);
546 589
547/** No-op. */ 590static bool
591drm_dequeue_event(struct drm_file *file_priv,
592 size_t total, size_t max, struct drm_pending_event **out)
593{
594 struct drm_device *dev = file_priv->minor->dev;
595 struct drm_pending_event *e;
596 unsigned long flags;
597 bool ret = false;
598
599 spin_lock_irqsave(&dev->event_lock, flags);
600
601 *out = NULL;
602 if (list_empty(&file_priv->event_list))
603 goto out;
604 e = list_first_entry(&file_priv->event_list,
605 struct drm_pending_event, link);
606 if (e->event->length + total > max)
607 goto out;
608
609 file_priv->event_space += e->event->length;
610 list_del(&e->link);
611 *out = e;
612 ret = true;
613
614out:
615 spin_unlock_irqrestore(&dev->event_lock, flags);
616 return ret;
617}
618
619ssize_t drm_read(struct file *filp, char __user *buffer,
620 size_t count, loff_t *offset)
621{
622 struct drm_file *file_priv = filp->private_data;
623 struct drm_pending_event *e;
624 size_t total;
625 ssize_t ret;
626
627 ret = wait_event_interruptible(file_priv->event_wait,
628 !list_empty(&file_priv->event_list));
629 if (ret < 0)
630 return ret;
631
632 total = 0;
633 while (drm_dequeue_event(file_priv, total, count, &e)) {
634 if (copy_to_user(buffer + total,
635 e->event, e->event->length)) {
636 total = -EFAULT;
637 break;
638 }
639
640 total += e->event->length;
641 e->destroy(e);
642 }
643
644 return total;
645}
646EXPORT_SYMBOL(drm_read);
647
548unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 648unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
549{ 649{
550 return 0; 650 struct drm_file *file_priv = filp->private_data;
651 unsigned int mask = 0;
652
653 poll_wait(filp, &file_priv->event_wait, wait);
654
655 if (!list_empty(&file_priv->event_list))
656 mask |= POLLIN | POLLRDNORM;
657
658 return mask;
551} 659}
552EXPORT_SYMBOL(drm_poll); 660EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3bdc78..7998ee66b317 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
429 429
430 spin_lock_irqsave(&dev->vbl_lock, irqflags); 430 spin_lock_irqsave(&dev->vbl_lock, irqflags);
431 /* Going from 0->1 means we have to enable interrupts again */ 431 /* Going from 0->1 means we have to enable interrupts again */
432 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && 432 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
433 !dev->vblank_enabled[crtc]) { 433 if (!dev->vblank_enabled[crtc]) {
434 ret = dev->driver->enable_vblank(dev, crtc); 434 ret = dev->driver->enable_vblank(dev, crtc);
435 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 435 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
436 if (ret) 436 if (ret)
437 atomic_dec(&dev->vblank_refcount[crtc]);
438 else {
439 dev->vblank_enabled[crtc] = 1;
440 drm_update_vblank_count(dev, crtc);
441 }
442 }
443 } else {
444 if (!dev->vblank_enabled[crtc]) {
437 atomic_dec(&dev->vblank_refcount[crtc]); 445 atomic_dec(&dev->vblank_refcount[crtc]);
438 else { 446 ret = -EINVAL;
439 dev->vblank_enabled[crtc] = 1;
440 drm_update_vblank_count(dev, crtc);
441 } 447 }
442 } 448 }
443 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 449 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
464} 470}
465EXPORT_SYMBOL(drm_vblank_put); 471EXPORT_SYMBOL(drm_vblank_put);
466 472
473void drm_vblank_off(struct drm_device *dev, int crtc)
474{
475 unsigned long irqflags;
476
477 spin_lock_irqsave(&dev->vbl_lock, irqflags);
478 DRM_WAKEUP(&dev->vbl_queue[crtc]);
479 dev->vblank_enabled[crtc] = 0;
480 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
481 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
482}
483EXPORT_SYMBOL(drm_vblank_off);
484
467/** 485/**
468 * drm_vblank_pre_modeset - account for vblanks across mode sets 486 * drm_vblank_pre_modeset - account for vblanks across mode sets
469 * @dev: DRM device 487 * @dev: DRM device
@@ -550,6 +568,63 @@ out:
550 return ret; 568 return ret;
551} 569}
552 570
571static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
572 union drm_wait_vblank *vblwait,
573 struct drm_file *file_priv)
574{
575 struct drm_pending_vblank_event *e;
576 struct timeval now;
577 unsigned long flags;
578 unsigned int seq;
579
580 e = kzalloc(sizeof *e, GFP_KERNEL);
581 if (e == NULL)
582 return -ENOMEM;
583
584 e->pipe = pipe;
585 e->event.base.type = DRM_EVENT_VBLANK;
586 e->event.base.length = sizeof e->event;
587 e->event.user_data = vblwait->request.signal;
588 e->base.event = &e->event.base;
589 e->base.file_priv = file_priv;
590 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
591
592 do_gettimeofday(&now);
593 spin_lock_irqsave(&dev->event_lock, flags);
594
595 if (file_priv->event_space < sizeof e->event) {
596 spin_unlock_irqrestore(&dev->event_lock, flags);
597 kfree(e);
598 return -ENOMEM;
599 }
600
601 file_priv->event_space -= sizeof e->event;
602 seq = drm_vblank_count(dev, pipe);
603 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
604 (seq - vblwait->request.sequence) <= (1 << 23)) {
605 vblwait->request.sequence = seq + 1;
606 vblwait->reply.sequence = vblwait->request.sequence;
607 }
608
609 DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
610 vblwait->request.sequence, seq, pipe);
611
612 e->event.sequence = vblwait->request.sequence;
613 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
614 e->event.tv_sec = now.tv_sec;
615 e->event.tv_usec = now.tv_usec;
616 drm_vblank_put(dev, e->pipe);
617 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
618 wake_up_interruptible(&e->base.file_priv->event_wait);
619 } else {
620 list_add_tail(&e->base.link, &dev->vblank_event_list);
621 }
622
623 spin_unlock_irqrestore(&dev->event_lock, flags);
624
625 return 0;
626}
627
553/** 628/**
554 * Wait for VBLANK. 629 * Wait for VBLANK.
555 * 630 *
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
609 goto done; 684 goto done;
610 } 685 }
611 686
687 if (flags & _DRM_VBLANK_EVENT)
688 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
689
612 if ((flags & _DRM_VBLANK_NEXTONMISS) && 690 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
613 (seq - vblwait->request.sequence) <= (1<<23)) { 691 (seq - vblwait->request.sequence) <= (1<<23)) {
614 vblwait->request.sequence = seq + 1; 692 vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
641 return ret; 719 return ret;
642} 720}
643 721
722void drm_handle_vblank_events(struct drm_device *dev, int crtc)
723{
724 struct drm_pending_vblank_event *e, *t;
725 struct timeval now;
726 unsigned long flags;
727 unsigned int seq;
728
729 do_gettimeofday(&now);
730 seq = drm_vblank_count(dev, crtc);
731
732 spin_lock_irqsave(&dev->event_lock, flags);
733
734 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
735 if (e->pipe != crtc)
736 continue;
737 if ((seq - e->event.sequence) > (1<<23))
738 continue;
739
740 DRM_DEBUG("vblank event on %d, current %d\n",
741 e->event.sequence, seq);
742
743 e->event.sequence = seq;
744 e->event.tv_sec = now.tv_sec;
745 e->event.tv_usec = now.tv_usec;
746 drm_vblank_put(dev, e->pipe);
747 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
748 wake_up_interruptible(&e->base.file_priv->event_wait);
749 }
750
751 spin_unlock_irqrestore(&dev->event_lock, flags);
752}
753
644/** 754/**
645 * drm_handle_vblank - handle a vblank event 755 * drm_handle_vblank - handle a vblank event
646 * @dev: DRM device 756 * @dev: DRM device
@@ -651,7 +761,11 @@ done:
651 */ 761 */
652void drm_handle_vblank(struct drm_device *dev, int crtc) 762void drm_handle_vblank(struct drm_device *dev, int crtc)
653{ 763{
764 if (!dev->num_crtcs)
765 return;
766
654 atomic_inc(&dev->_vblank_count[crtc]); 767 atomic_inc(&dev->_vblank_count[crtc]);
655 DRM_WAKEUP(&dev->vbl_queue[crtc]); 768 DRM_WAKEUP(&dev->vbl_queue[crtc]);
769 drm_handle_vblank_events(dev, crtc);
656} 770}
657EXPORT_SYMBOL(drm_handle_vblank); 771EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4f0de4..d7d7eac3ddd2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
226} 226}
227EXPORT_SYMBOL(drm_mm_get_block_generic); 227EXPORT_SYMBOL(drm_mm_get_block_generic);
228 228
229struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
230 unsigned long size,
231 unsigned alignment,
232 unsigned long start,
233 unsigned long end,
234 int atomic)
235{
236 struct drm_mm_node *align_splitoff = NULL;
237 unsigned tmp = 0;
238 unsigned wasted = 0;
239
240 if (node->start < start)
241 wasted += start - node->start;
242 if (alignment)
243 tmp = ((node->start + wasted) % alignment);
244
245 if (tmp)
246 wasted += alignment - tmp;
247 if (wasted) {
248 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
249 if (unlikely(align_splitoff == NULL))
250 return NULL;
251 }
252
253 if (node->size == size) {
254 list_del_init(&node->fl_entry);
255 node->free = 0;
256 } else {
257 node = drm_mm_split_at_start(node, size, atomic);
258 }
259
260 if (align_splitoff)
261 drm_mm_put_block(align_splitoff);
262
263 return node;
264}
265EXPORT_SYMBOL(drm_mm_get_block_range_generic);
266
229/* 267/*
230 * Put a block. Merge with the previous and / or next block if they are free. 268 * Put a block. Merge with the previous and / or next block if they are free.
231 * Otherwise add to the free stack. 269 * Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
331} 369}
332EXPORT_SYMBOL(drm_mm_search_free); 370EXPORT_SYMBOL(drm_mm_search_free);
333 371
372struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
373 unsigned long size,
374 unsigned alignment,
375 unsigned long start,
376 unsigned long end,
377 int best_match)
378{
379 struct list_head *list;
380 const struct list_head *free_stack = &mm->fl_entry;
381 struct drm_mm_node *entry;
382 struct drm_mm_node *best;
383 unsigned long best_size;
384 unsigned wasted;
385
386 best = NULL;
387 best_size = ~0UL;
388
389 list_for_each(list, free_stack) {
390 entry = list_entry(list, struct drm_mm_node, fl_entry);
391 wasted = 0;
392
393 if (entry->size < size)
394 continue;
395
396 if (entry->start > end || (entry->start+entry->size) < start)
397 continue;
398
399 if (entry->start < start)
400 wasted += start - entry->start;
401
402 if (alignment) {
403 register unsigned tmp = (entry->start + wasted) % alignment;
404 if (tmp)
405 wasted += alignment - tmp;
406 }
407
408 if (entry->size >= size + wasted) {
409 if (!best_match)
410 return entry;
411 if (size < best_size) {
412 best = entry;
413 best_size = entry->size;
414 }
415 }
416 }
417
418 return best;
419}
420EXPORT_SYMBOL(drm_mm_search_free_in_range);
421
334int drm_mm_clean(struct drm_mm * mm) 422int drm_mm_clean(struct drm_mm * mm)
335{ 423{
336 struct list_head *head = &mm->ml_entry; 424 struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
381} 469}
382EXPORT_SYMBOL(drm_mm_takedown); 470EXPORT_SYMBOL(drm_mm_takedown);
383 471
472void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
473{
474 struct drm_mm_node *entry;
475 int total_used = 0, total_free = 0, total = 0;
476
477 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
478 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
479 prefix, entry->start, entry->start + entry->size,
480 entry->size, entry->free ? "free" : "used");
481 total += entry->size;
482 if (entry->free)
483 total_free += entry->size;
484 else
485 total_used += entry->size;
486 }
487 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
488 total_used, total_free);
489}
490EXPORT_SYMBOL(drm_mm_debug_table);
491
384#if defined(CONFIG_DEBUG_FS) 492#if defined(CONFIG_DEBUG_FS)
385int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 493int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
386{ 494{
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
395 else 503 else
396 total_used += entry->size; 504 total_used += entry->size;
397 } 505 }
398 seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); 506 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
399 return 0; 507 return 0;
400} 508}
401EXPORT_SYMBOL(drm_mm_dump_table); 509EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f677215f1d..6d81a02463a3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
553} 553}
554EXPORT_SYMBOL(drm_mode_height); 554EXPORT_SYMBOL(drm_mode_height);
555 555
556/** drm_mode_hsync - get the hsync of a mode
557 * @mode: mode
558 *
559 * LOCKING:
560 * None.
561 *
562 * Return @modes's hsync rate in kHz, rounded to the nearest int.
563 */
564int drm_mode_hsync(struct drm_display_mode *mode)
565{
566 unsigned int calc_val;
567
568 if (mode->hsync)
569 return mode->hsync;
570
571 if (mode->htotal < 0)
572 return 0;
573
574 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
575 calc_val += 500; /* round to 1000Hz */
576 calc_val /= 1000; /* truncate to kHz */
577
578 return calc_val;
579}
580EXPORT_SYMBOL(drm_mode_hsync);
581
556/** 582/**
557 * drm_mode_vrefresh - get the vrefresh of a mode 583 * drm_mode_vrefresh - get the vrefresh of a mode
558 * @mode: mode 584 * @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
560 * LOCKING: 586 * LOCKING:
561 * None. 587 * None.
562 * 588 *
563 * Return @mode's vrefresh rate or calculate it if necessary. 589 * Return @mode's vrefresh rate in Hz or calculate it if necessary.
564 * 590 *
565 * FIXME: why is this needed? shouldn't vrefresh be set already? 591 * FIXME: why is this needed? shouldn't vrefresh be set already?
566 * 592 *
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a82d612..ad73e141afdb 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
128 kref_get(&master->refcount); 128 kref_get(&master->refcount);
129 return master; 129 return master;
130} 130}
131EXPORT_SYMBOL(drm_master_get);
131 132
132static void drm_master_destroy(struct kref *kref) 133static void drm_master_destroy(struct kref *kref)
133{ 134{
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
170 kref_put(&(*master)->refcount, drm_master_destroy); 171 kref_put(&(*master)->refcount, drm_master_destroy);
171 *master = NULL; 172 *master = NULL;
172} 173}
174EXPORT_SYMBOL(drm_master_put);
173 175
174int drm_setmaster_ioctl(struct drm_device *dev, void *data, 176int drm_setmaster_ioctl(struct drm_device *dev, void *data,
175 struct drm_file *file_priv) 177 struct drm_file *file_priv)
176{ 178{
179 int ret = 0;
180
177 if (file_priv->is_master) 181 if (file_priv->is_master)
178 return 0; 182 return 0;
179 183
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
188 mutex_lock(&dev->struct_mutex); 192 mutex_lock(&dev->struct_mutex);
189 file_priv->minor->master = drm_master_get(file_priv->master); 193 file_priv->minor->master = drm_master_get(file_priv->master);
190 file_priv->is_master = 1; 194 file_priv->is_master = 1;
195 if (dev->driver->master_set) {
196 ret = dev->driver->master_set(dev, file_priv, false);
197 if (unlikely(ret != 0)) {
198 file_priv->is_master = 0;
199 drm_master_put(&file_priv->minor->master);
200 }
201 }
191 mutex_unlock(&dev->struct_mutex); 202 mutex_unlock(&dev->struct_mutex);
192 } 203 }
193 204
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
204 return -EINVAL; 215 return -EINVAL;
205 216
206 mutex_lock(&dev->struct_mutex); 217 mutex_lock(&dev->struct_mutex);
218 if (dev->driver->master_drop)
219 dev->driver->master_drop(dev, file_priv, false);
207 drm_master_put(&file_priv->minor->master); 220 drm_master_put(&file_priv->minor->master);
208 file_priv->is_master = 0; 221 file_priv->is_master = 0;
209 mutex_unlock(&dev->struct_mutex); 222 mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
220 INIT_LIST_HEAD(&dev->ctxlist); 233 INIT_LIST_HEAD(&dev->ctxlist);
221 INIT_LIST_HEAD(&dev->vmalist); 234 INIT_LIST_HEAD(&dev->vmalist);
222 INIT_LIST_HEAD(&dev->maplist); 235 INIT_LIST_HEAD(&dev->maplist);
236 INIT_LIST_HEAD(&dev->vblank_event_list);
223 237
224 spin_lock_init(&dev->count_lock); 238 spin_lock_init(&dev->count_lock);
225 spin_lock_init(&dev->drw_lock); 239 spin_lock_init(&dev->drw_lock);
240 spin_lock_init(&dev->event_lock);
226 init_timer(&dev->timer); 241 init_timer(&dev->timer);
227 mutex_init(&dev->struct_mutex); 242 mutex_init(&dev->struct_mutex);
228 mutex_init(&dev->ctxlist_mutex); 243 mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096bc..9929f84ec3e1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
15 intel_lvds.o \ 15 intel_lvds.o \
16 intel_bios.o \ 16 intel_bios.o \
17 intel_dp.o \ 17 intel_dp.o \
18 intel_dp_i2c.o \
19 intel_hdmi.o \ 18 intel_hdmi.o \
20 intel_sdvo.o \ 19 intel_sdvo.o \
21 intel_modes.o \ 20 intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
23 intel_fb.o \ 22 intel_fb.o \
24 intel_tv.o \ 23 intel_tv.o \
25 intel_dvo.o \ 24 intel_dvo.o \
25 intel_overlay.o \
26 dvo_ch7xxx.o \ 26 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 27 dvo_ch7017.o \
28 dvo_ivch.o \ 28 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531db..1184c14ba87d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
249 if (val != CH7017_DEVICE_ID_VALUE && 249 if (val != CH7017_DEVICE_ID_VALUE &&
250 val != CH7018_DEVICE_ID_VALUE && 250 val != CH7018_DEVICE_ID_VALUE &&
251 val != CH7019_DEVICE_ID_VALUE) { 251 val != CH7019_DEVICE_ID_VALUE) {
252 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", 252 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
253 "Slave %d.\n",
253 val, i2cbus->adapter.name,dvo->slave_addr); 254 val, i2cbus->adapter.name,dvo->slave_addr);
254 goto fail; 255 goto fail;
255 } 256 }
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
284 uint8_t horizontal_active_pixel_output, vertical_active_line_output; 285 uint8_t horizontal_active_pixel_output, vertical_active_line_output;
285 uint8_t active_input_line_output; 286 uint8_t active_input_line_output;
286 287
287 DRM_DEBUG("Registers before mode setting\n"); 288 DRM_DEBUG_KMS("Registers before mode setting\n");
288 ch7017_dump_regs(dvo); 289 ch7017_dump_regs(dvo);
289 290
290 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ 291 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
346 /* Turn the LVDS back on with new settings. */ 347 /* Turn the LVDS back on with new settings. */
347 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); 348 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
348 349
349 DRM_DEBUG("Registers after mode setting\n"); 350 DRM_DEBUG_KMS("Registers after mode setting\n");
350 ch7017_dump_regs(dvo); 351 ch7017_dump_regs(dvo);
351} 352}
352 353
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
386#define DUMP(reg) \ 387#define DUMP(reg) \
387do { \ 388do { \
388 ch7017_read(dvo, reg, &val); \ 389 ch7017_read(dvo, reg, &val); \
389 DRM_DEBUG(#reg ": %02x\n", val); \ 390 DRM_DEBUG_KMS(#reg ": %02x\n", val); \
390} while (0) 391} while (0)
391 392
392 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); 393 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b896289680..d56ff5cc22b2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
152 }; 152 };
153 153
154 if (!ch7xxx->quiet) { 154 if (!ch7xxx->quiet) {
155 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 155 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
156 addr, i2cbus->adapter.name, dvo->slave_addr); 156 addr, i2cbus->adapter.name, dvo->slave_addr);
157 } 157 }
158 return false; 158 return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
179 return true; 179 return true;
180 180
181 if (!ch7xxx->quiet) { 181 if (!ch7xxx->quiet) {
182 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 182 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
183 addr, i2cbus->adapter.name, dvo->slave_addr); 183 addr, i2cbus->adapter.name, dvo->slave_addr);
184 } 184 }
185 185
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
207 207
208 name = ch7xxx_get_id(vendor); 208 name = ch7xxx_get_id(vendor);
209 if (!name) { 209 if (!name) {
210 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 210 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
211 "slave %d.\n",
211 vendor, adapter->name, dvo->slave_addr); 212 vendor, adapter->name, dvo->slave_addr);
212 goto out; 213 goto out;
213 } 214 }
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 goto out; 218 goto out;
218 219
219 if (device != CH7xxx_DID) { 220 if (device != CH7xxx_DID) {
220 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 221 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
222 "slave %d.\n",
221 vendor, adapter->name, dvo->slave_addr); 223 vendor, adapter->name, dvo->slave_addr);
222 goto out; 224 goto out;
223 } 225 }
224 226
225 ch7xxx->quiet = false; 227 ch7xxx->quiet = false;
226 DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", 228 DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
227 name, vendor, device); 229 name, vendor, device);
228 return true; 230 return true;
229out: 231out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
315 317
316 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 318 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
317 if ((i % 8) == 0 ) 319 if ((i % 8) == 0 )
318 DRM_DEBUG("\n %02X: ", i); 320 DRM_LOG_KMS("\n %02X: ", i);
319 DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); 321 DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
320 } 322 }
321} 323}
322 324
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921fe..24169e528f0f 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
202 }; 202 };
203 203
204 if (!priv->quiet) { 204 if (!priv->quiet) {
205 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 205 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
206 "%s:%02x.\n",
206 addr, i2cbus->adapter.name, dvo->slave_addr); 207 addr, i2cbus->adapter.name, dvo->slave_addr);
207 } 208 }
208 return false; 209 return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
230 return true; 231 return true;
231 232
232 if (!priv->quiet) { 233 if (!priv->quiet) {
233 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 234 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
234 addr, i2cbus->adapter.name, dvo->slave_addr); 235 addr, i2cbus->adapter.name, dvo->slave_addr);
235 } 236 }
236 237
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
261 * the address it's responding on. 262 * the address it's responding on.
262 */ 263 */
263 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { 264 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
264 DRM_DEBUG("ivch detect failed due to address mismatch " 265 DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
265 "(%d vs %d)\n", 266 "(%d vs %d)\n",
266 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); 267 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
267 goto out; 268 goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
367 uint16_t val; 368 uint16_t val;
368 369
369 ivch_read(dvo, VR00, &val); 370 ivch_read(dvo, VR00, &val);
370 DRM_DEBUG("VR00: 0x%04x\n", val); 371 DRM_LOG_KMS("VR00: 0x%04x\n", val);
371 ivch_read(dvo, VR01, &val); 372 ivch_read(dvo, VR01, &val);
372 DRM_DEBUG("VR01: 0x%04x\n", val); 373 DRM_LOG_KMS("VR01: 0x%04x\n", val);
373 ivch_read(dvo, VR30, &val); 374 ivch_read(dvo, VR30, &val);
374 DRM_DEBUG("VR30: 0x%04x\n", val); 375 DRM_LOG_KMS("VR30: 0x%04x\n", val);
375 ivch_read(dvo, VR40, &val); 376 ivch_read(dvo, VR40, &val);
376 DRM_DEBUG("VR40: 0x%04x\n", val); 377 DRM_LOG_KMS("VR40: 0x%04x\n", val);
377 378
378 /* GPIO registers */ 379 /* GPIO registers */
379 ivch_read(dvo, VR80, &val); 380 ivch_read(dvo, VR80, &val);
380 DRM_DEBUG("VR80: 0x%04x\n", val); 381 DRM_LOG_KMS("VR80: 0x%04x\n", val);
381 ivch_read(dvo, VR81, &val); 382 ivch_read(dvo, VR81, &val);
382 DRM_DEBUG("VR81: 0x%04x\n", val); 383 DRM_LOG_KMS("VR81: 0x%04x\n", val);
383 ivch_read(dvo, VR82, &val); 384 ivch_read(dvo, VR82, &val);
384 DRM_DEBUG("VR82: 0x%04x\n", val); 385 DRM_LOG_KMS("VR82: 0x%04x\n", val);
385 ivch_read(dvo, VR83, &val); 386 ivch_read(dvo, VR83, &val);
386 DRM_DEBUG("VR83: 0x%04x\n", val); 387 DRM_LOG_KMS("VR83: 0x%04x\n", val);
387 ivch_read(dvo, VR84, &val); 388 ivch_read(dvo, VR84, &val);
388 DRM_DEBUG("VR84: 0x%04x\n", val); 389 DRM_LOG_KMS("VR84: 0x%04x\n", val);
389 ivch_read(dvo, VR85, &val); 390 ivch_read(dvo, VR85, &val);
390 DRM_DEBUG("VR85: 0x%04x\n", val); 391 DRM_LOG_KMS("VR85: 0x%04x\n", val);
391 ivch_read(dvo, VR86, &val); 392 ivch_read(dvo, VR86, &val);
392 DRM_DEBUG("VR86: 0x%04x\n", val); 393 DRM_LOG_KMS("VR86: 0x%04x\n", val);
393 ivch_read(dvo, VR87, &val); 394 ivch_read(dvo, VR87, &val);
394 DRM_DEBUG("VR87: 0x%04x\n", val); 395 DRM_LOG_KMS("VR87: 0x%04x\n", val);
395 ivch_read(dvo, VR88, &val); 396 ivch_read(dvo, VR88, &val);
396 DRM_DEBUG("VR88: 0x%04x\n", val); 397 DRM_LOG_KMS("VR88: 0x%04x\n", val);
397 398
398 /* Scratch register 0 - AIM Panel type */ 399 /* Scratch register 0 - AIM Panel type */
399 ivch_read(dvo, VR8E, &val); 400 ivch_read(dvo, VR8E, &val);
400 DRM_DEBUG("VR8E: 0x%04x\n", val); 401 DRM_LOG_KMS("VR8E: 0x%04x\n", val);
401 402
402 /* Scratch register 1 - Status register */ 403 /* Scratch register 1 - Status register */
403 ivch_read(dvo, VR8F, &val); 404 ivch_read(dvo, VR8F, &val);
404 DRM_DEBUG("VR8F: 0x%04x\n", val); 405 DRM_LOG_KMS("VR8F: 0x%04x\n", val);
405} 406}
406 407
407static void ivch_save(struct intel_dvo_device *dvo) 408static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5c..0001c13f0a80 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
105 }; 105 };
106 106
107 if (!sil->quiet) { 107 if (!sil->quiet) {
108 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 108 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
109 addr, i2cbus->adapter.name, dvo->slave_addr); 109 addr, i2cbus->adapter.name, dvo->slave_addr);
110 } 110 }
111 return false; 111 return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
131 return true; 131 return true;
132 132
133 if (!sil->quiet) { 133 if (!sil->quiet) {
134 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 134 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
135 addr, i2cbus->adapter.name, dvo->slave_addr); 135 addr, i2cbus->adapter.name, dvo->slave_addr);
136 } 136 }
137 137
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
158 goto out; 158 goto out;
159 159
160 if (ch != (SIL164_VID & 0xff)) { 160 if (ch != (SIL164_VID & 0xff)) {
161 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 161 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
162 ch, adapter->name, dvo->slave_addr); 162 ch, adapter->name, dvo->slave_addr);
163 goto out; 163 goto out;
164 } 164 }
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
167 goto out; 167 goto out;
168 168
169 if (ch != (SIL164_DID & 0xff)) { 169 if (ch != (SIL164_DID & 0xff)) {
170 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 170 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
171 ch, adapter->name, dvo->slave_addr); 171 ch, adapter->name, dvo->slave_addr);
172 goto out; 172 goto out;
173 } 173 }
174 sil->quiet = false; 174 sil->quiet = false;
175 175
176 DRM_DEBUG("init sil164 dvo controller successfully!\n"); 176 DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
177 return true; 177 return true;
178 178
179out: 179out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
241 uint8_t val; 241 uint8_t val;
242 242
243 sil164_readb(dvo, SIL164_FREQ_LO, &val); 243 sil164_readb(dvo, SIL164_FREQ_LO, &val);
244 DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); 244 DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
245 sil164_readb(dvo, SIL164_FREQ_HI, &val); 245 sil164_readb(dvo, SIL164_FREQ_HI, &val);
246 DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); 246 DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
247 sil164_readb(dvo, SIL164_REG8, &val); 247 sil164_readb(dvo, SIL164_REG8, &val);
248 DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); 248 DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
249 sil164_readb(dvo, SIL164_REG9, &val); 249 sil164_readb(dvo, SIL164_REG9, &val);
250 DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); 250 DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
251 sil164_readb(dvo, SIL164_REGC, &val); 251 sil164_readb(dvo, SIL164_REGC, &val);
252 DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); 252 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
253} 253}
254 254
255static void sil164_save(struct intel_dvo_device *dvo) 255static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384ec..c7c391bc116a 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
130 }; 130 };
131 131
132 if (!tfp->quiet) { 132 if (!tfp->quiet) {
133 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 133 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
134 addr, i2cbus->adapter.name, dvo->slave_addr); 134 addr, i2cbus->adapter.name, dvo->slave_addr);
135 } 135 }
136 return false; 136 return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
156 return true; 156 return true;
157 157
158 if (!tfp->quiet) { 158 if (!tfp->quiet) {
159 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 159 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
160 addr, i2cbus->adapter.name, dvo->slave_addr); 160 addr, i2cbus->adapter.name, dvo->slave_addr);
161 } 161 }
162 162
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
191 tfp->quiet = true; 191 tfp->quiet = true;
192 192
193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { 193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
194 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", 194 DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
195 "Slave %d.\n",
195 id, adapter->name, dvo->slave_addr); 196 id, adapter->name, dvo->slave_addr);
196 goto out; 197 goto out;
197 } 198 }
198 199
199 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { 200 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
200 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", 201 DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
202 "Slave %d.\n",
201 id, adapter->name, dvo->slave_addr); 203 id, adapter->name, dvo->slave_addr);
202 goto out; 204 goto out;
203 } 205 }
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
262 uint8_t val, val2; 264 uint8_t val, val2;
263 265
264 tfp410_readb(dvo, TFP410_REV, &val); 266 tfp410_readb(dvo, TFP410_REV, &val);
265 DRM_DEBUG("TFP410_REV: 0x%02X\n", val); 267 DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
266 tfp410_readb(dvo, TFP410_CTL_1, &val); 268 tfp410_readb(dvo, TFP410_CTL_1, &val);
267 DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); 269 DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
268 tfp410_readb(dvo, TFP410_CTL_2, &val); 270 tfp410_readb(dvo, TFP410_CTL_2, &val);
269 DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); 271 DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
270 tfp410_readb(dvo, TFP410_CTL_3, &val); 272 tfp410_readb(dvo, TFP410_CTL_3, &val);
271 DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); 273 DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
272 tfp410_readb(dvo, TFP410_USERCFG, &val); 274 tfp410_readb(dvo, TFP410_USERCFG, &val);
273 DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); 275 DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
274 tfp410_readb(dvo, TFP410_DE_DLY, &val); 276 tfp410_readb(dvo, TFP410_DE_DLY, &val);
275 DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); 277 DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
276 tfp410_readb(dvo, TFP410_DE_CTL, &val); 278 tfp410_readb(dvo, TFP410_DE_CTL, &val);
277 DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); 279 DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
278 tfp410_readb(dvo, TFP410_DE_TOP, &val); 280 tfp410_readb(dvo, TFP410_DE_TOP, &val);
279 DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); 281 DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
280 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); 282 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
281 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); 283 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
282 DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); 284 DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
283 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); 285 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
284 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); 286 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
285 DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); 287 DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
286 tfp410_readb(dvo, TFP410_H_RES_LO, &val); 288 tfp410_readb(dvo, TFP410_H_RES_LO, &val);
287 tfp410_readb(dvo, TFP410_H_RES_HI, &val2); 289 tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
288 DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); 290 DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
289 tfp410_readb(dvo, TFP410_V_RES_LO, &val); 291 tfp410_readb(dvo, TFP410_V_RES_LO, &val);
290 tfp410_readb(dvo, TFP410_V_RES_HI, &val2); 292 tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
291 DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); 293 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
292} 294}
293 295
294static void tfp410_save(struct intel_dvo_device *dvo) 296static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3cb..18476bf0b580 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/debugfs.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 { 97 {
97 struct drm_gem_object *obj = obj_priv->obj; 98 struct drm_gem_object *obj = obj_priv->obj;
98 99
99 seq_printf(m, " %p: %s %8zd %08x %08x %d %s", 100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
100 obj, 101 obj,
101 get_pin_flag(obj_priv), 102 get_pin_flag(obj_priv),
102 obj->size, 103 obj->size,
103 obj->read_domains, obj->write_domain, 104 obj->read_domains, obj->write_domain,
104 obj_priv->last_rendering_seqno, 105 obj_priv->last_rendering_seqno,
105 obj_priv->dirty ? "dirty" : ""); 106 obj_priv->dirty ? " dirty" : "",
107 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
106 108
107 if (obj->name) 109 if (obj->name)
108 seq_printf(m, " (name: %d)", obj->name); 110 seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
160 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
161 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
162 164
163 if (!IS_IGDNG(dev)) { 165 if (!IS_IRONLAKE(dev)) {
164 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
165 I915_READ(IER)); 167 I915_READ(IER));
166 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
412 return 0; 414 return 0;
413} 415}
414 416
417static int
418i915_wedged_open(struct inode *inode,
419 struct file *filp)
420{
421 filp->private_data = inode->i_private;
422 return 0;
423}
424
425static ssize_t
426i915_wedged_read(struct file *filp,
427 char __user *ubuf,
428 size_t max,
429 loff_t *ppos)
430{
431 struct drm_device *dev = filp->private_data;
432 drm_i915_private_t *dev_priv = dev->dev_private;
433 char buf[80];
434 int len;
435
436 len = snprintf(buf, sizeof (buf),
437 "wedged : %d\n",
438 atomic_read(&dev_priv->mm.wedged));
439
440 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
441}
442
443static ssize_t
444i915_wedged_write(struct file *filp,
445 const char __user *ubuf,
446 size_t cnt,
447 loff_t *ppos)
448{
449 struct drm_device *dev = filp->private_data;
450 drm_i915_private_t *dev_priv = dev->dev_private;
451 char buf[20];
452 int val = 1;
453
454 if (cnt > 0) {
455 if (cnt > sizeof (buf) - 1)
456 return -EINVAL;
457
458 if (copy_from_user(buf, ubuf, cnt))
459 return -EFAULT;
460 buf[cnt] = 0;
461
462 val = simple_strtoul(buf, NULL, 0);
463 }
464
465 DRM_INFO("Manually setting wedged to %d\n", val);
466
467 atomic_set(&dev_priv->mm.wedged, val);
468 if (val) {
469 DRM_WAKEUP(&dev_priv->irq_queue);
470 queue_work(dev_priv->wq, &dev_priv->error_work);
471 }
472
473 return cnt;
474}
475
476static const struct file_operations i915_wedged_fops = {
477 .owner = THIS_MODULE,
478 .open = i915_wedged_open,
479 .read = i915_wedged_read,
480 .write = i915_wedged_write,
481};
482
483/* As the drm_debugfs_init() routines are called before dev->dev_private is
484 * allocated we need to hook into the minor for release. */
485static int
486drm_add_fake_info_node(struct drm_minor *minor,
487 struct dentry *ent,
488 const void *key)
489{
490 struct drm_info_node *node;
491
492 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
493 if (node == NULL) {
494 debugfs_remove(ent);
495 return -ENOMEM;
496 }
497
498 node->minor = minor;
499 node->dent = ent;
500 node->info_ent = (void *) key;
501 list_add(&node->list, &minor->debugfs_nodes.list);
502
503 return 0;
504}
505
506static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
507{
508 struct drm_device *dev = minor->dev;
509 struct dentry *ent;
510
511 ent = debugfs_create_file("i915_wedged",
512 S_IRUGO | S_IWUSR,
513 root, dev,
514 &i915_wedged_fops);
515 if (IS_ERR(ent))
516 return PTR_ERR(ent);
517
518 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
519}
415 520
416static struct drm_info_list i915_debugfs_list[] = { 521static struct drm_info_list i915_debugfs_list[] = {
417 {"i915_regs", i915_registers_info, 0}, 522 {"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
432 537
433int i915_debugfs_init(struct drm_minor *minor) 538int i915_debugfs_init(struct drm_minor *minor)
434{ 539{
540 int ret;
541
542 ret = i915_wedged_create(minor->debugfs_root, minor);
543 if (ret)
544 return ret;
545
435 return drm_debugfs_create_files(i915_debugfs_list, 546 return drm_debugfs_create_files(i915_debugfs_list,
436 I915_DEBUGFS_ENTRIES, 547 I915_DEBUGFS_ENTRIES,
437 minor->debugfs_root, minor); 548 minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
441{ 552{
442 drm_debugfs_remove_files(i915_debugfs_list, 553 drm_debugfs_remove_files(i915_debugfs_list,
443 I915_DEBUGFS_ENTRIES, minor); 554 I915_DEBUGFS_ENTRIES, minor);
555 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
556 1, minor);
444} 557}
445 558
446#endif /* CONFIG_DEBUG_FS */ 559#endif /* CONFIG_DEBUG_FS */
447
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45fa..701bfeac7f57 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
807 case I915_PARAM_NUM_FENCES_AVAIL: 807 case I915_PARAM_NUM_FENCES_AVAIL:
808 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 808 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
809 break; 809 break;
810 case I915_PARAM_HAS_OVERLAY:
811 value = dev_priv->overlay ? 1 : 0;
812 break;
813 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1;
815 break;
810 default: 816 default:
811 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 817 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
812 param->param); 818 param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
962 * Some of the preallocated space is taken by the GTT 968 * Some of the preallocated space is taken by the GTT
963 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 969 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
964 */ 970 */
965 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) 971 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
966 overhead = 4096; 972 overhead = 4096;
967 else 973 else
968 overhead = (*aperture_size / 1024) + 4096; 974 overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1048 int gtt_offset, gtt_size; 1054 int gtt_offset, gtt_size;
1049 1055
1050 if (IS_I965G(dev)) { 1056 if (IS_I965G(dev)) {
1051 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1057 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
1052 gtt_offset = 2*1024*1024; 1058 gtt_offset = 2*1024*1024;
1053 gtt_size = 2*1024*1024; 1059 gtt_size = 2*1024*1024;
1054 } else { 1060 } else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1070 1076
1071 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1077 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1072 1078
1073 DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1079 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1074 1080
1075 /* Mask out these reserved bits on this hardware. */ 1081 /* Mask out these reserved bits on this hardware. */
1076 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1082 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1096 phys =(entry & PTE_ADDRESS_MASK) | 1102 phys =(entry & PTE_ADDRESS_MASK) |
1097 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1103 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1098 1104
1099 DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1105 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1100 1106
1101 return phys; 1107 return phys;
1102} 1108}
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
1306 drm_i915_private_t *dev_priv = dev->dev_private; 1312 drm_i915_private_t *dev_priv = dev->dev_private;
1307 u32 tmp; 1313 u32 tmp;
1308 1314
1309 if (!IS_IGD(dev)) 1315 if (!IS_PINEVIEW(dev))
1310 return; 1316 return;
1311 1317
1312 tmp = I915_READ(CLKCFG); 1318 tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1413 if (ret) 1419 if (ret)
1414 goto out_iomapfree; 1420 goto out_iomapfree;
1415 1421
1416 dev_priv->wq = create_workqueue("i915"); 1422 dev_priv->wq = create_singlethread_workqueue("i915");
1417 if (dev_priv->wq == NULL) { 1423 if (dev_priv->wq == NULL) {
1418 DRM_ERROR("Failed to create our workqueue.\n"); 1424 DRM_ERROR("Failed to create our workqueue.\n");
1419 ret = -ENOMEM; 1425 ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1434 1440
1435 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1441 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1436 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1442 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1437 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1443 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
1438 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1444 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1439 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1445 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1440 } 1446 }
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1489 } 1495 }
1490 1496
1491 /* Must be done after probing outputs */ 1497 /* Must be done after probing outputs */
1492 /* FIXME: verify on IGDNG */ 1498 intel_opregion_init(dev, 0);
1493 if (!IS_IGDNG(dev))
1494 intel_opregion_init(dev, 0);
1495 1499
1496 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1500 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1497 (unsigned long) dev); 1501 (unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
1525 } 1529 }
1526 1530
1527 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1531 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1532 /*
1533 * free the memory space allocated for the child device
1534 * config parsed from VBT
1535 */
1536 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1537 kfree(dev_priv->child_dev);
1538 dev_priv->child_dev = NULL;
1539 dev_priv->child_dev_num = 0;
1540 }
1528 drm_irq_uninstall(dev); 1541 drm_irq_uninstall(dev);
1529 vga_client_register(dev->pdev, NULL, NULL, NULL); 1542 vga_client_register(dev->pdev, NULL, NULL, NULL);
1530 } 1543 }
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
1535 if (dev_priv->regs != NULL) 1548 if (dev_priv->regs != NULL)
1536 iounmap(dev_priv->regs); 1549 iounmap(dev_priv->regs);
1537 1550
1538 if (!IS_IGDNG(dev)) 1551 intel_opregion_free(dev, 0);
1539 intel_opregion_free(dev, 0);
1540 1552
1541 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1553 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1542 intel_modeset_cleanup(dev); 1554 intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
1548 mutex_unlock(&dev->struct_mutex); 1560 mutex_unlock(&dev->struct_mutex);
1549 drm_mm_takedown(&dev_priv->vram); 1561 drm_mm_takedown(&dev_priv->vram);
1550 i915_gem_lastclose(dev); 1562 i915_gem_lastclose(dev);
1563
1564 intel_cleanup_overlay(dev);
1551 } 1565 }
1552 1566
1553 pci_dev_put(dev_priv->bridge_dev); 1567 pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
1656 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1670 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1657 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1671 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1658 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), 1672 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
1673 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1674 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1659}; 1675};
1660 1676
1661int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1677int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f6..2fa217862058 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -333,6 +333,7 @@ static struct drm_driver driver = {
333 .mmap = drm_gem_mmap, 333 .mmap = drm_gem_mmap,
334 .poll = drm_poll, 334 .poll = drm_poll,
335 .fasync = drm_fasync, 335 .fasync = drm_fasync,
336 .read = drm_read,
336#ifdef CONFIG_COMPAT 337#ifdef CONFIG_COMPAT
337 .compat_ioctl = i915_compat_ioctl, 338 .compat_ioctl = i915_compat_ioctl,
338#endif 339#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f6591192..fbecac72f5bb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
170 /* clock gating init */ 170 /* clock gating init */
171}; 171};
172 172
173struct intel_overlay;
174
173typedef struct drm_i915_private { 175typedef struct drm_i915_private {
174 struct drm_device *dev; 176 struct drm_device *dev;
175 177
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
187 unsigned int status_gfx_addr; 189 unsigned int status_gfx_addr;
188 drm_local_map_t hws_map; 190 drm_local_map_t hws_map;
189 struct drm_gem_object *hws_obj; 191 struct drm_gem_object *hws_obj;
192 struct drm_gem_object *pwrctx;
190 193
191 struct resource mch_res; 194 struct resource mch_res;
192 195
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
206 /** Cached value of IMR to avoid reads in updating the bitfield */ 209 /** Cached value of IMR to avoid reads in updating the bitfield */
207 u32 irq_mask_reg; 210 u32 irq_mask_reg;
208 u32 pipestat[2]; 211 u32 pipestat[2];
209 /** splitted irq regs for graphics and display engine on IGDNG, 212 /** splitted irq regs for graphics and display engine on Ironlake,
210 irq_mask_reg is still used for display irq. */ 213 irq_mask_reg is still used for display irq. */
211 u32 gt_irq_mask_reg; 214 u32 gt_irq_mask_reg;
212 u32 gt_irq_enable_reg; 215 u32 gt_irq_enable_reg;
213 u32 de_irq_enable_reg; 216 u32 de_irq_enable_reg;
217 u32 pch_irq_mask_reg;
218 u32 pch_irq_enable_reg;
214 219
215 u32 hotplug_supported_mask; 220 u32 hotplug_supported_mask;
216 struct work_struct hotplug_work; 221 struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
240 245
241 struct intel_opregion opregion; 246 struct intel_opregion opregion;
242 247
248 /* overlay */
249 struct intel_overlay *overlay;
250
243 /* LVDS info */ 251 /* LVDS info */
244 int backlight_duty_cycle; /* restore backlight to this value */ 252 int backlight_duty_cycle; /* restore backlight to this value */
245 bool panel_wants_dither; 253 bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
258 266
259 struct notifier_block lid_notifier; 267 struct notifier_block lid_notifier;
260 268
261 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 269 int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
262 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 270 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
263 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 271 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
264 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 272 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
280 u32 saveDSPBCNTR; 288 u32 saveDSPBCNTR;
281 u32 saveDSPARB; 289 u32 saveDSPARB;
282 u32 saveRENDERSTANDBY; 290 u32 saveRENDERSTANDBY;
291 u32 savePWRCTXA;
283 u32 saveHWS; 292 u32 saveHWS;
284 u32 savePIPEACONF; 293 u32 savePIPEACONF;
285 u32 savePIPEBCONF; 294 u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
374 u32 saveFDI_RXA_IMR; 383 u32 saveFDI_RXA_IMR;
375 u32 saveFDI_RXB_IMR; 384 u32 saveFDI_RXB_IMR;
376 u32 saveCACHE_MODE_0; 385 u32 saveCACHE_MODE_0;
377 u32 saveD_STATE;
378 u32 saveDSPCLK_GATE_D;
379 u32 saveMI_ARB_STATE; 386 u32 saveMI_ARB_STATE;
380 u32 saveSWF0[16]; 387 u32 saveSWF0[16];
381 u32 saveSWF1[16]; 388 u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
539 /* indicate whether the LVDS_BORDER should be enabled or not */ 546 /* indicate whether the LVDS_BORDER should be enabled or not */
540 unsigned int lvds_border_bits; 547 unsigned int lvds_border_bits;
541 548
549 struct drm_crtc *plane_to_crtc_mapping[2];
550 struct drm_crtc *pipe_to_crtc_mapping[2];
551 wait_queue_head_t pending_flip_queue;
552
542 /* Reclocking support */ 553 /* Reclocking support */
543 bool render_reclock_avail; 554 bool render_reclock_avail;
544 bool lvds_downclock_avail; 555 bool lvds_downclock_avail;
556 /* indicates the reduced downclock for LVDS*/
557 int lvds_downclock;
545 struct work_struct idle_work; 558 struct work_struct idle_work;
546 struct timer_list idle_timer; 559 struct timer_list idle_timer;
547 bool busy; 560 bool busy;
548 u16 orig_clock; 561 u16 orig_clock;
562 int child_dev_num;
563 struct child_device_config *child_dev;
549} drm_i915_private_t; 564} drm_i915_private_t;
550 565
551/** driver private structure attached to each drm_gem_object */ 566/** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
638 * Advice: are the backing pages purgeable? 653 * Advice: are the backing pages purgeable?
639 */ 654 */
640 int madv; 655 int madv;
656
657 /**
658 * Number of crtcs where this object is currently the fb, but
659 * will be page flipped away on the next vblank. When it
660 * reaches 0, dev_priv->pending_flip_queue will be woken up.
661 */
662 atomic_t pending_flip;
641}; 663};
642 664
643/** 665/**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
738void 760void
739i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 761i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
740 762
763void intel_enable_asle (struct drm_device *dev);
764
741 765
742/* i915_mem.c */ 766/* i915_mem.c */
743extern int i915_mem_alloc(struct drm_device *dev, void *data, 767extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
813int i915_gem_do_init(struct drm_device *dev, unsigned long start, 837int i915_gem_do_init(struct drm_device *dev, unsigned long start,
814 unsigned long end); 838 unsigned long end);
815int i915_gem_idle(struct drm_device *dev); 839int i915_gem_idle(struct drm_device *dev);
840uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
841 uint32_t flush_domains);
842int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
816int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 843int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
817int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 844int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
818 int write); 845 int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
824int i915_gem_object_get_pages(struct drm_gem_object *obj); 851int i915_gem_object_get_pages(struct drm_gem_object *obj);
825void i915_gem_object_put_pages(struct drm_gem_object *obj); 852void i915_gem_object_put_pages(struct drm_gem_object *obj);
826void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 853void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
854void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
827 855
828void i915_gem_shrinker_init(void); 856void i915_gem_shrinker_init(void);
829void i915_gem_shrinker_exit(void); 857void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
863extern int intel_opregion_init(struct drm_device *dev, int resume); 891extern int intel_opregion_init(struct drm_device *dev, int resume);
864extern void intel_opregion_free(struct drm_device *dev, int suspend); 892extern void intel_opregion_free(struct drm_device *dev, int suspend);
865extern void opregion_asle_intr(struct drm_device *dev); 893extern void opregion_asle_intr(struct drm_device *dev);
894extern void ironlake_opregion_gse_intr(struct drm_device *dev);
866extern void opregion_enable_asle(struct drm_device *dev); 895extern void opregion_enable_asle(struct drm_device *dev);
867#else 896#else
868static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 897static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
869static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } 898static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
870static inline void opregion_asle_intr(struct drm_device *dev) { return; } 899static inline void opregion_asle_intr(struct drm_device *dev) { return; }
900static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
871static inline void opregion_enable_asle(struct drm_device *dev) { return; } 901static inline void opregion_enable_asle(struct drm_device *dev) { return; }
872#endif 902#endif
873 903
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
955#define IS_I830(dev) ((dev)->pci_device == 0x3577) 985#define IS_I830(dev) ((dev)->pci_device == 0x3577)
956#define IS_845G(dev) ((dev)->pci_device == 0x2562) 986#define IS_845G(dev) ((dev)->pci_device == 0x2562)
957#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 987#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
958#define IS_I855(dev) ((dev)->pci_device == 0x3582)
959#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 988#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
989#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
960 990
961#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 991#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
962#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 992#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
990 (dev)->pci_device == 0x2E42 || \ 1020 (dev)->pci_device == 0x2E42 || \
991 IS_GM45(dev)) 1021 IS_GM45(dev))
992 1022
993#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 1023#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
994#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) 1024#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
995#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) 1025#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
996 1026
997#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 1027#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
998 (dev)->pci_device == 0x29B2 || \ 1028 (dev)->pci_device == 0x29B2 || \
999 (dev)->pci_device == 0x29D2 || \ 1029 (dev)->pci_device == 0x29D2 || \
1000 (IS_IGD(dev))) 1030 (IS_PINEVIEW(dev)))
1001 1031
1002#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042) 1032#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1003#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046) 1033#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1004#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev)) 1034#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
1005 1035
1006#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1036#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1007 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ 1037 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1008 IS_IGDNG(dev)) 1038 IS_IRONLAKE(dev))
1009 1039
1010#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1040#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1011 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 1041 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1012 IS_IGD(dev) || IS_IGDNG_M(dev)) 1042 IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
1013 1043
1014#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ 1044#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1015 IS_IGDNG(dev)) 1045 IS_IRONLAKE(dev))
1016/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1046/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1017 * rows, which changed the alignment requirements and fence programming. 1047 * rows, which changed the alignment requirements and fence programming.
1018 */ 1048 */
1019#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 1049#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
1020 IS_I915GM(dev))) 1050 IS_I915GM(dev)))
1021#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1051#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
1022#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1052#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1023#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 1053#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1054#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1055#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1024#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1057#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
1025/* dsparb controlled by hw only */ 1058/* dsparb controlled by hw only */
1026#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1059#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1027 1060
1028#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 1061#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1029#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1062#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1030#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1063#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
1031 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1064 (IS_I9XX(dev) || IS_GM45(dev)) && \
1032 !IS_IGD(dev) && \ 1065 !IS_PINEVIEW(dev) && \
1033 !IS_IGDNG(dev)) 1066 !IS_IRONLAKE(dev))
1067#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
1034 1068
1035#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1069#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1036 1070
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa599923..8c463cf2050a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1288 list->hash.key = list->file_offset_node->start; 1288 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n"); 1290 DRM_ERROR("failed to add to map hash\n");
1291 ret = -ENOMEM;
1291 goto out_free_mm; 1292 goto out_free_mm;
1292 } 1293 }
1293 1294
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1583 * 1584 *
1584 * Returned sequence numbers are nonzero on success. 1585 * Returned sequence numbers are nonzero on success.
1585 */ 1586 */
1586static uint32_t 1587uint32_t
1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1588i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains) 1589 uint32_t flush_domains)
1589{ 1590{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 OUT_RING(MI_USER_INTERRUPT); 1618 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING(); 1619 ADVANCE_LP_RING();
1619 1620
1620 DRM_DEBUG("%d\n", seqno); 1621 DRM_DEBUG_DRIVER("%d\n", seqno);
1621 1622
1622 request->seqno = seqno; 1623 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies; 1624 request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 mutex_unlock(&dev->struct_mutex); 1821 mutex_unlock(&dev->struct_mutex);
1821} 1822}
1822 1823
1823/** 1824int
1824 * Waits for a sequence number to be signaled, and cleans up the 1825i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1825 * request and object lists appropriately for that event.
1826 */
1827static int
1828i915_wait_request(struct drm_device *dev, uint32_t seqno)
1829{ 1826{
1830 drm_i915_private_t *dev_priv = dev->dev_private; 1827 drm_i915_private_t *dev_priv = dev->dev_private;
1831 u32 ier; 1828 u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1837 return -EIO; 1834 return -EIO;
1838 1835
1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1836 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1840 if (IS_IGDNG(dev)) 1837 if (IS_IRONLAKE(dev))
1841 ier = I915_READ(DEIER) | I915_READ(GTIER); 1838 ier = I915_READ(DEIER) | I915_READ(GTIER);
1842 else 1839 else
1843 ier = I915_READ(IER); 1840 ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1852 1849
1853 dev_priv->mm.waiting_gem_seqno = seqno; 1850 dev_priv->mm.waiting_gem_seqno = seqno;
1854 i915_user_irq_get(dev); 1851 i915_user_irq_get(dev);
1855 ret = wait_event_interruptible(dev_priv->irq_queue, 1852 if (interruptible)
1856 i915_seqno_passed(i915_get_gem_seqno(dev), 1853 ret = wait_event_interruptible(dev_priv->irq_queue,
1857 seqno) || 1854 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged)); 1855 atomic_read(&dev_priv->mm.wedged));
1856 else
1857 wait_event(dev_priv->irq_queue,
1858 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1859 atomic_read(&dev_priv->mm.wedged));
1860
1859 i915_user_irq_put(dev); 1861 i915_user_irq_put(dev);
1860 dev_priv->mm.waiting_gem_seqno = 0; 1862 dev_priv->mm.waiting_gem_seqno = 0;
1861 1863
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1879 return ret; 1881 return ret;
1880} 1882}
1881 1883
1884/**
1885 * Waits for a sequence number to be signaled, and cleans up the
1886 * request and object lists appropriately for that event.
1887 */
1888static int
1889i915_wait_request(struct drm_device *dev, uint32_t seqno)
1890{
1891 return i915_do_wait_request(dev, seqno, 1);
1892}
1893
1882static void 1894static void
1883i915_gem_flush(struct drm_device *dev, 1895i915_gem_flush(struct drm_device *dev,
1884 uint32_t invalidate_domains, 1896 uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
1947#endif 1959#endif
1948 BEGIN_LP_RING(2); 1960 BEGIN_LP_RING(2);
1949 OUT_RING(cmd); 1961 OUT_RING(cmd);
1950 OUT_RING(0); /* noop */ 1962 OUT_RING(MI_NOOP);
1951 ADVANCE_LP_RING(); 1963 ADVANCE_LP_RING();
1952 } 1964 }
1953} 1965}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2760 old_write_domain); 2772 old_write_domain);
2761} 2773}
2762 2774
2775void
2776i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2777{
2778 switch (obj->write_domain) {
2779 case I915_GEM_DOMAIN_GTT:
2780 i915_gem_object_flush_gtt_write_domain(obj);
2781 break;
2782 case I915_GEM_DOMAIN_CPU:
2783 i915_gem_object_flush_cpu_write_domain(obj);
2784 break;
2785 default:
2786 i915_gem_object_flush_gpu_write_domain(obj);
2787 break;
2788 }
2789}
2790
2763/** 2791/**
2764 * Moves a single object to the GTT read, and possibly write domain. 2792 * Moves a single object to the GTT read, and possibly write domain.
2765 * 2793 *
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3525 return 0; 3553 return 0;
3526} 3554}
3527 3555
3556static int
3557i915_gem_wait_for_pending_flip(struct drm_device *dev,
3558 struct drm_gem_object **object_list,
3559 int count)
3560{
3561 drm_i915_private_t *dev_priv = dev->dev_private;
3562 struct drm_i915_gem_object *obj_priv;
3563 DEFINE_WAIT(wait);
3564 int i, ret = 0;
3565
3566 for (;;) {
3567 prepare_to_wait(&dev_priv->pending_flip_queue,
3568 &wait, TASK_INTERRUPTIBLE);
3569 for (i = 0; i < count; i++) {
3570 obj_priv = object_list[i]->driver_private;
3571 if (atomic_read(&obj_priv->pending_flip) > 0)
3572 break;
3573 }
3574 if (i == count)
3575 break;
3576
3577 if (!signal_pending(current)) {
3578 mutex_unlock(&dev->struct_mutex);
3579 schedule();
3580 mutex_lock(&dev->struct_mutex);
3581 continue;
3582 }
3583 ret = -ERESTARTSYS;
3584 break;
3585 }
3586 finish_wait(&dev_priv->pending_flip_queue, &wait);
3587
3588 return ret;
3589}
3590
3528int 3591int
3529i915_gem_execbuffer(struct drm_device *dev, void *data, 3592i915_gem_execbuffer(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv) 3593 struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3540 int ret, ret2, i, pinned = 0; 3603 int ret, ret2, i, pinned = 0;
3541 uint64_t exec_offset; 3604 uint64_t exec_offset;
3542 uint32_t seqno, flush_domains, reloc_index; 3605 uint32_t seqno, flush_domains, reloc_index;
3543 int pin_tries; 3606 int pin_tries, flips;
3544 3607
3545#if WATCH_EXEC 3608#if WATCH_EXEC
3546 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3609 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3552 return -EINVAL; 3615 return -EINVAL;
3553 } 3616 }
3554 /* Copy in the exec list from userland */ 3617 /* Copy in the exec list from userland */
3555 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); 3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3556 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); 3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3557 if (exec_list == NULL || object_list == NULL) { 3620 if (exec_list == NULL || object_list == NULL) {
3558 DRM_ERROR("Failed to allocate exec or object list " 3621 DRM_ERROR("Failed to allocate exec or object list "
3559 "for %d buffers\n", 3622 "for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3598 i915_verify_inactive(dev, __FILE__, __LINE__); 3661 i915_verify_inactive(dev, __FILE__, __LINE__);
3599 3662
3600 if (atomic_read(&dev_priv->mm.wedged)) { 3663 if (atomic_read(&dev_priv->mm.wedged)) {
3601 DRM_ERROR("Execbuf while wedged\n");
3602 mutex_unlock(&dev->struct_mutex); 3664 mutex_unlock(&dev->struct_mutex);
3603 ret = -EIO; 3665 ret = -EIO;
3604 goto pre_mutex_err; 3666 goto pre_mutex_err;
3605 } 3667 }
3606 3668
3607 if (dev_priv->mm.suspended) { 3669 if (dev_priv->mm.suspended) {
3608 DRM_ERROR("Execbuf while VT-switched.\n");
3609 mutex_unlock(&dev->struct_mutex); 3670 mutex_unlock(&dev->struct_mutex);
3610 ret = -EBUSY; 3671 ret = -EBUSY;
3611 goto pre_mutex_err; 3672 goto pre_mutex_err;
3612 } 3673 }
3613 3674
3614 /* Look up object handles */ 3675 /* Look up object handles */
3676 flips = 0;
3615 for (i = 0; i < args->buffer_count; i++) { 3677 for (i = 0; i < args->buffer_count; i++) {
3616 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3678 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3617 exec_list[i].handle); 3679 exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3630 goto err; 3692 goto err;
3631 } 3693 }
3632 obj_priv->in_execbuffer = true; 3694 obj_priv->in_execbuffer = true;
3695 flips += atomic_read(&obj_priv->pending_flip);
3696 }
3697
3698 if (flips > 0) {
3699 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3700 args->buffer_count);
3701 if (ret)
3702 goto err;
3633 } 3703 }
3634 3704
3635 /* Pin and relocate */ 3705 /* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
4356 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4426 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4357 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4427 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4358 I915_READ(HWS_PGA); /* posting read */ 4428 I915_READ(HWS_PGA); /* posting read */
4359 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4429 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4360 4430
4361 return 0; 4431 return 0;
4362} 4432}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
4614 for (i = 0; i < 8; i++) 4684 for (i = 0; i < 8; i++)
4615 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4685 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4616 } 4686 }
4617
4618 i915_gem_detect_bit_6_swizzle(dev); 4687 i915_gem_detect_bit_6_swizzle(dev);
4688 init_waitqueue_head(&dev_priv->pending_flip_queue);
4619} 4689}
4620 4690
4621/* 4691/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4790 user_data = (char __user *) (uintptr_t) args->data_ptr; 4860 user_data = (char __user *) (uintptr_t) args->data_ptr;
4791 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 4861 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4792 4862
4793 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); 4863 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4794 ret = copy_from_user(obj_addr, user_data, args->size); 4864 ret = copy_from_user(obj_addr, user_data, args->size);
4795 if (ret) 4865 if (ret)
4796 return -EFAULT; 4866 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453ca..30d6af6c09bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
121 0, pcibios_align_resource, 121 0, pcibios_align_resource,
122 dev_priv->bridge_dev); 122 dev_priv->bridge_dev);
123 if (ret) { 123 if (ret) {
124 DRM_DEBUG("failed bus alloc: %d\n", ret); 124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0; 125 dev_priv->mch_res.start = 0;
126 goto out; 126 goto out;
127 } 127 }
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable; 210 bool need_disable;
211 211
212 if (IS_IGDNG(dev)) { 212 if (IS_IRONLAKE(dev)) {
213 /* On IGDNG whatever DRAM config, GPU always do 213 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 214 * same swizzling setup.
215 */ 215 */
216 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 216 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6eb..85f4c5de97e2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
43 * we leave them always unmasked in IMR and then control enabling them through 43 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone. 44 * PIPESTAT alone.
45 */ 45 */
46#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ 46#define I915_INTERRUPT_ENABLE_FIX \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 47 (I915_ASLE_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 48 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 49 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
50 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
51 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
52 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
50 53
51/** Interrupts that we mask and unmask at runtime. */ 54/** Interrupts that we mask and unmask at runtime. */
52#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 55#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
61 DRM_I915_VBLANK_PIPE_B) 64 DRM_I915_VBLANK_PIPE_B)
62 65
63void 66void
64igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 67ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65{ 68{
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 69 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask; 70 dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
71} 74}
72 75
73static inline void 76static inline void
74igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 77ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75{ 78{
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 79 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask; 80 dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
82 85
83/* For display hotplug interrupt */ 86/* For display hotplug interrupt */
84void 87void
85igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 88ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86{ 89{
87 if ((dev_priv->irq_mask_reg & mask) != 0) { 90 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask; 91 dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
92} 95}
93 96
94static inline void 97static inline void
95igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 98ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96{ 99{
97 if ((dev_priv->irq_mask_reg & mask) != mask) { 100 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask; 101 dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
157} 160}
158 161
159/** 162/**
163 * intel_enable_asle - enable ASLE interrupt for OpRegion
164 */
165void intel_enable_asle (struct drm_device *dev)
166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168
169 if (IS_IRONLAKE(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else
172 i915_enable_pipestat(dev_priv, 1,
173 I915_LEGACY_BLC_EVENT_ENABLE);
174}
175
176/**
160 * i915_pipe_enabled - check if a pipe is enabled 177 * i915_pipe_enabled - check if a pipe is enabled
161 * @dev: DRM device 178 * @dev: DRM device
162 * @pipe: pipe to check 179 * @pipe: pipe to check
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
191 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 208 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192 209
193 if (!i915_pipe_enabled(dev, pipe)) { 210 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 211 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
212 "pipe %d\n", pipe);
195 return 0; 213 return 0;
196 } 214 }
197 215
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
220 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 238 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
221 239
222 if (!i915_pipe_enabled(dev, pipe)) { 240 if (!i915_pipe_enabled(dev, pipe)) {
223 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 241 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
242 "pipe %d\n", pipe);
224 return 0; 243 return 0;
225 } 244 }
226 245
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
250 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
251} 270}
252 271
253irqreturn_t igdng_irq_handler(struct drm_device *dev) 272irqreturn_t ironlake_irq_handler(struct drm_device *dev)
254{ 273{
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir, de_ier; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
258 u32 new_de_iir, new_gt_iir; 277 u32 new_de_iir, new_gt_iir, new_pch_iir;
259 struct drm_i915_master_private *master_priv; 278 struct drm_i915_master_private *master_priv;
260 279
261 /* disable master interrupt before clearing iir */ 280 /* disable master interrupt before clearing iir */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
265 284
266 de_iir = I915_READ(DEIIR); 285 de_iir = I915_READ(DEIIR);
267 gt_iir = I915_READ(GTIIR); 286 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR);
268 288
269 for (;;) { 289 for (;;) {
270 if (de_iir == 0 && gt_iir == 0) 290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
271 break; 291 break;
272 292
273 ret = IRQ_HANDLED; 293 ret = IRQ_HANDLED;
274 294
295 /* should clear PCH hotplug event before clear CPU irq */
296 I915_WRITE(SDEIIR, pch_iir);
297 new_pch_iir = I915_READ(SDEIIR);
298
275 I915_WRITE(DEIIR, de_iir); 299 I915_WRITE(DEIIR, de_iir);
276 new_de_iir = I915_READ(DEIIR); 300 new_de_iir = I915_READ(DEIIR);
277 I915_WRITE(GTIIR, gt_iir); 301 I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
291 DRM_WAKEUP(&dev_priv->irq_queue); 315 DRM_WAKEUP(&dev_priv->irq_queue);
292 } 316 }
293 317
318 if (de_iir & DE_GSE)
319 ironlake_opregion_gse_intr(dev);
320
321 /* check event from PCH */
322 if ((de_iir & DE_PCH_EVENT) &&
323 (pch_iir & SDE_HOTPLUG_MASK)) {
324 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
325 }
326
294 de_iir = new_de_iir; 327 de_iir = new_de_iir;
295 gt_iir = new_gt_iir; 328 gt_iir = new_gt_iir;
329 pch_iir = new_pch_iir;
296 } 330 }
297 331
298 I915_WRITE(DEIER, de_ier); 332 I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
317 char *reset_event[] = { "RESET=1", NULL }; 351 char *reset_event[] = { "RESET=1", NULL };
318 char *reset_done_event[] = { "ERROR=0", NULL }; 352 char *reset_done_event[] = { "ERROR=0", NULL };
319 353
320 DRM_DEBUG("generating error event\n"); 354 DRM_DEBUG_DRIVER("generating error event\n");
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 355 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
322 356
323 if (atomic_read(&dev_priv->mm.wedged)) { 357 if (atomic_read(&dev_priv->mm.wedged)) {
324 if (IS_I965G(dev)) { 358 if (IS_I965G(dev)) {
325 DRM_DEBUG("resetting chip\n"); 359 DRM_DEBUG_DRIVER("resetting chip\n");
326 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 360 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
327 if (!i965_reset(dev, GDRST_RENDER)) { 361 if (!i965_reset(dev, GDRST_RENDER)) {
328 atomic_set(&dev_priv->mm.wedged, 0); 362 atomic_set(&dev_priv->mm.wedged, 0);
329 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 363 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
330 } 364 }
331 } else { 365 } else {
332 printk("reboot required\n"); 366 DRM_DEBUG_DRIVER("reboot required\n");
333 } 367 }
334 } 368 }
335} 369}
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
355 389
356 error = kmalloc(sizeof(*error), GFP_ATOMIC); 390 error = kmalloc(sizeof(*error), GFP_ATOMIC);
357 if (!error) { 391 if (!error) {
358 DRM_DEBUG("out ot memory, not capturing error state\n"); 392 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
359 goto out; 393 goto out;
360 } 394 }
361 395
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
512 /* 546 /*
513 * Wakeup waiting processes so they don't hang 547 * Wakeup waiting processes so they don't hang
514 */ 548 */
515 printk("i915: Waking up sleeping processes\n");
516 DRM_WAKEUP(&dev_priv->irq_queue); 549 DRM_WAKEUP(&dev_priv->irq_queue);
517 } 550 }
518 551
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
535 568
536 atomic_inc(&dev_priv->irq_received); 569 atomic_inc(&dev_priv->irq_received);
537 570
538 if (IS_IGDNG(dev)) 571 if (IS_IRONLAKE(dev))
539 return igdng_irq_handler(dev); 572 return ironlake_irq_handler(dev);
540 573
541 iir = I915_READ(IIR); 574 iir = I915_READ(IIR);
542 575
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
568 */ 601 */
569 if (pipea_stats & 0x8000ffff) { 602 if (pipea_stats & 0x8000ffff) {
570 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) 603 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
571 DRM_DEBUG("pipe a underrun\n"); 604 DRM_DEBUG_DRIVER("pipe a underrun\n");
572 I915_WRITE(PIPEASTAT, pipea_stats); 605 I915_WRITE(PIPEASTAT, pipea_stats);
573 irq_received = 1; 606 irq_received = 1;
574 } 607 }
575 608
576 if (pipeb_stats & 0x8000ffff) { 609 if (pipeb_stats & 0x8000ffff) {
577 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) 610 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
578 DRM_DEBUG("pipe b underrun\n"); 611 DRM_DEBUG_DRIVER("pipe b underrun\n");
579 I915_WRITE(PIPEBSTAT, pipeb_stats); 612 I915_WRITE(PIPEBSTAT, pipeb_stats);
580 irq_received = 1; 613 irq_received = 1;
581 } 614 }
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
591 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 624 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
592 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 625 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
593 626
594 DRM_DEBUG("hotplug event received, stat 0x%08x\n", 627 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
595 hotplug_status); 628 hotplug_status);
596 if (hotplug_status & dev_priv->hotplug_supported_mask) 629 if (hotplug_status & dev_priv->hotplug_supported_mask)
597 queue_work(dev_priv->wq, 630 queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
599 632
600 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 633 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
601 I915_READ(PORT_HOTPLUG_STAT); 634 I915_READ(PORT_HOTPLUG_STAT);
602
603 /* EOS interrupts occurs */
604 if (IS_IGD(dev) &&
605 (hotplug_status & CRT_EOS_INT_STATUS)) {
606 u32 temp;
607
608 DRM_DEBUG("EOS interrupt occurs\n");
609 /* status is already cleared */
610 temp = I915_READ(ADPA);
611 temp &= ~ADPA_DAC_ENABLE;
612 I915_WRITE(ADPA, temp);
613
614 temp = I915_READ(PORT_HOTPLUG_EN);
615 temp &= ~CRT_EOS_INT_EN;
616 I915_WRITE(PORT_HOTPLUG_EN, temp);
617
618 temp = I915_READ(PORT_HOTPLUG_STAT);
619 if (temp & CRT_EOS_INT_STATUS)
620 I915_WRITE(PORT_HOTPLUG_STAT,
621 CRT_EOS_INT_STATUS);
622 }
623 } 635 }
624 636
625 I915_WRITE(IIR, iir); 637 I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
641 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 653 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
642 } 654 }
643 655
656 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
657 intel_prepare_page_flip(dev, 0);
658
659 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
660 intel_prepare_page_flip(dev, 1);
661
644 if (pipea_stats & vblank_status) { 662 if (pipea_stats & vblank_status) {
645 vblank++; 663 vblank++;
646 drm_handle_vblank(dev, 0); 664 drm_handle_vblank(dev, 0);
665 intel_finish_page_flip(dev, 0);
647 } 666 }
648 667
649 if (pipeb_stats & vblank_status) { 668 if (pipeb_stats & vblank_status) {
650 vblank++; 669 vblank++;
651 drm_handle_vblank(dev, 1); 670 drm_handle_vblank(dev, 1);
671 intel_finish_page_flip(dev, 1);
652 } 672 }
653 673
654 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 674 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
684 704
685 i915_kernel_lost_context(dev); 705 i915_kernel_lost_context(dev);
686 706
687 DRM_DEBUG("\n"); 707 DRM_DEBUG_DRIVER("\n");
688 708
689 dev_priv->counter++; 709 dev_priv->counter++;
690 if (dev_priv->counter > 0x7FFFFFFFUL) 710 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
709 729
710 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 730 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
711 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 731 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
712 if (IS_IGDNG(dev)) 732 if (IS_IRONLAKE(dev))
713 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 733 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
714 else 734 else
715 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 735 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
716 } 736 }
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
725 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 745 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
726 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 746 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
727 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 747 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
728 if (IS_IGDNG(dev)) 748 if (IS_IRONLAKE(dev))
729 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 749 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
730 else 750 else
731 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 751 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
732 } 752 }
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
749 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 769 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
750 int ret = 0; 770 int ret = 0;
751 771
752 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 772 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
753 READ_BREADCRUMB(dev_priv)); 773 READ_BREADCRUMB(dev_priv));
754 774
755 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 775 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
832 if (!(pipeconf & PIPEACONF_ENABLE)) 852 if (!(pipeconf & PIPEACONF_ENABLE))
833 return -EINVAL; 853 return -EINVAL;
834 854
835 if (IS_IGDNG(dev)) 855 if (IS_IRONLAKE(dev))
836 return 0; 856 return 0;
837 857
838 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 858 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 874 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
855 unsigned long irqflags; 875 unsigned long irqflags;
856 876
857 if (IS_IGDNG(dev)) 877 if (IS_IRONLAKE(dev))
858 return; 878 return;
859 879
860 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 880 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
868{ 888{
869 struct drm_i915_private *dev_priv = dev->dev_private; 889 struct drm_i915_private *dev_priv = dev->dev_private;
870 890
871 if (!IS_IGDNG(dev)) 891 if (!IS_IRONLAKE(dev))
872 opregion_enable_asle(dev); 892 opregion_enable_asle(dev);
873 dev_priv->irq_enabled = 1; 893 dev_priv->irq_enabled = 1;
874} 894}
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
976 996
977/* drm_dma.h hooks 997/* drm_dma.h hooks
978*/ 998*/
979static void igdng_irq_preinstall(struct drm_device *dev) 999static void ironlake_irq_preinstall(struct drm_device *dev)
980{ 1000{
981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1001 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
982 1002
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
992 I915_WRITE(GTIMR, 0xffffffff); 1012 I915_WRITE(GTIMR, 0xffffffff);
993 I915_WRITE(GTIER, 0x0); 1013 I915_WRITE(GTIER, 0x0);
994 (void) I915_READ(GTIER); 1014 (void) I915_READ(GTIER);
1015
1016 /* south display irq */
1017 I915_WRITE(SDEIMR, 0xffffffff);
1018 I915_WRITE(SDEIER, 0x0);
1019 (void) I915_READ(SDEIER);
995} 1020}
996 1021
997static int igdng_irq_postinstall(struct drm_device *dev) 1022static int ironlake_irq_postinstall(struct drm_device *dev)
998{ 1023{
999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1024 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1000 /* enable kind of interrupts always enabled */ 1025 /* enable kind of interrupts always enabled */
1001 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; 1026 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
1002 u32 render_mask = GT_USER_INTERRUPT; 1027 u32 render_mask = GT_USER_INTERRUPT;
1028 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1029 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1003 1030
1004 dev_priv->irq_mask_reg = ~display_mask; 1031 dev_priv->irq_mask_reg = ~display_mask;
1005 dev_priv->de_irq_enable_reg = display_mask; 1032 dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
1019 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1046 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1020 (void) I915_READ(GTIER); 1047 (void) I915_READ(GTIER);
1021 1048
1049 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1050 dev_priv->pch_irq_enable_reg = hotplug_mask;
1051
1052 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1053 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1054 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1055 (void) I915_READ(SDEIER);
1056
1022 return 0; 1057 return 0;
1023} 1058}
1024 1059
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1031 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1066 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1032 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1067 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1033 1068
1034 if (IS_IGDNG(dev)) { 1069 if (IS_IRONLAKE(dev)) {
1035 igdng_irq_preinstall(dev); 1070 ironlake_irq_preinstall(dev);
1036 return; 1071 return;
1037 } 1072 }
1038 1073
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1059 1094
1060 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1095 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1061 1096
1062 if (IS_IGDNG(dev)) 1097 if (IS_IRONLAKE(dev))
1063 return igdng_irq_postinstall(dev); 1098 return ironlake_irq_postinstall(dev);
1064 1099
1065 /* Unmask the interrupts that we always want on. */ 1100 /* Unmask the interrupts that we always want on. */
1066 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1101 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1120 return 0; 1155 return 0;
1121} 1156}
1122 1157
1123static void igdng_irq_uninstall(struct drm_device *dev) 1158static void ironlake_irq_uninstall(struct drm_device *dev)
1124{ 1159{
1125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1126 I915_WRITE(HWSTAM, 0xffffffff); 1161 I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1143 1178
1144 dev_priv->vblank_pipe = 0; 1179 dev_priv->vblank_pipe = 0;
1145 1180
1146 if (IS_IGDNG(dev)) { 1181 if (IS_IRONLAKE(dev)) {
1147 igdng_irq_uninstall(dev); 1182 ironlake_irq_uninstall(dev);
1148 return; 1183 return;
1149 } 1184 }
1150 1185
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3f..7cc8410239cb 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
118#define ASLE_BACKLIGHT_FAIL (2<<12) 118#define ASLE_BACKLIGHT_FAIL (2<<12)
119#define ASLE_PFIT_FAIL (2<<14) 119#define ASLE_PFIT_FAIL (2<<14)
120#define ASLE_PWM_FREQ_FAIL (2<<16) 120#define ASLE_PWM_FREQ_FAIL (2<<16)
121#define ASLE_ALS_ILLUM_FAILED (1<<10)
122#define ASLE_BACKLIGHT_FAILED (1<<12)
123#define ASLE_PFIT_FAILED (1<<14)
124#define ASLE_PWM_FREQ_FAILED (1<<16)
121 125
122/* ASLE backlight brightness to set */ 126/* ASLE backlight brightness to set */
123#define ASLE_BCLP_VALID (1<<31) 127#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) 167 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 168 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else { 169 else {
166 if (IS_IGD(dev)) { 170 if (IS_PINEVIEW(dev)) {
167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 171 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 172 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT; 173 BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
224 asle_req = asle->aslc & ASLE_REQ_MSK; 228 asle_req = asle->aslc & ASLE_REQ_MSK;
225 229
226 if (!asle_req) { 230 if (!asle_req) {
227 DRM_DEBUG("non asle set request??\n"); 231 DRM_DEBUG_DRIVER("non asle set request??\n");
228 return; 232 return;
229 } 233 }
230 234
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
243 asle->aslc = asle_stat; 247 asle->aslc = asle_stat;
244} 248}
245 249
250static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct opregion_asle *asle = dev_priv->opregion.asle;
254 u32 cpu_pwm_ctl, pch_pwm_ctl2;
255 u32 max_backlight, level;
256
257 if (!(bclp & ASLE_BCLP_VALID))
258 return ASLE_BACKLIGHT_FAILED;
259
260 bclp &= ASLE_BCLP_MSK;
261 if (bclp < 0 || bclp > 255)
262 return ASLE_BACKLIGHT_FAILED;
263
264 cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
265 pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
266 /* get the max PWM frequency */
267 max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
268 /* calculate the expected PMW frequency */
269 level = (bclp * max_backlight) / 255;
270 /* reserve the high 16 bits */
271 cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
272 /* write the updated PWM frequency */
273 I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
274
275 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
276
277 return 0;
278}
279
280void ironlake_opregion_gse_intr(struct drm_device *dev)
281{
282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct opregion_asle *asle = dev_priv->opregion.asle;
284 u32 asle_stat = 0;
285 u32 asle_req;
286
287 if (!asle)
288 return;
289
290 asle_req = asle->aslc & ASLE_REQ_MSK;
291
292 if (!asle_req) {
293 DRM_DEBUG_DRIVER("non asle set request??\n");
294 return;
295 }
296
297 if (asle_req & ASLE_SET_ALS_ILLUM) {
298 DRM_DEBUG_DRIVER("Illum is not supported\n");
299 asle_stat |= ASLE_ALS_ILLUM_FAILED;
300 }
301
302 if (asle_req & ASLE_SET_BACKLIGHT)
303 asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
304
305 if (asle_req & ASLE_SET_PFIT) {
306 DRM_DEBUG_DRIVER("Pfit is not supported\n");
307 asle_stat |= ASLE_PFIT_FAILED;
308 }
309
310 if (asle_req & ASLE_SET_PWM_FREQ) {
311 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
312 asle_stat |= ASLE_PWM_FREQ_FAILED;
313 }
314
315 asle->aslc = asle_stat;
316}
246#define ASLE_ALS_EN (1<<0) 317#define ASLE_ALS_EN (1<<0)
247#define ASLE_BLC_EN (1<<1) 318#define ASLE_BLC_EN (1<<1)
248#define ASLE_PFIT_EN (1<<2) 319#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
258 unsigned long irqflags; 329 unsigned long irqflags;
259 330
260 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 331 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
261 i915_enable_pipestat(dev_priv, 1, 332 intel_enable_asle(dev);
262 I915_LEGACY_BLC_EVENT_ENABLE);
263 spin_unlock_irqrestore(&dev_priv->user_irq_lock, 333 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
264 irqflags); 334 irqflags);
265 } 335 }
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
361 int err = 0; 431 int err = 0;
362 432
363 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 433 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
364 DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); 434 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
365 if (asls == 0) { 435 if (asls == 0) {
366 DRM_DEBUG("ACPI OpRegion not supported!\n"); 436 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
367 return -ENOTSUPP; 437 return -ENOTSUPP;
368 } 438 }
369 439
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
373 443
374 opregion->header = base; 444 opregion->header = base;
375 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { 445 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
376 DRM_DEBUG("opregion signature mismatch\n"); 446 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
377 err = -EINVAL; 447 err = -EINVAL;
378 goto err_out; 448 goto err_out;
379 } 449 }
380 450
381 mboxes = opregion->header->mboxes; 451 mboxes = opregion->header->mboxes;
382 if (mboxes & MBOX_ACPI) { 452 if (mboxes & MBOX_ACPI) {
383 DRM_DEBUG("Public ACPI methods supported\n"); 453 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
384 opregion->acpi = base + OPREGION_ACPI_OFFSET; 454 opregion->acpi = base + OPREGION_ACPI_OFFSET;
385 if (drm_core_check_feature(dev, DRIVER_MODESET)) 455 if (drm_core_check_feature(dev, DRIVER_MODESET))
386 intel_didl_outputs(dev); 456 intel_didl_outputs(dev);
387 } else { 457 } else {
388 DRM_DEBUG("Public ACPI methods not supported\n"); 458 DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
389 err = -ENOTSUPP; 459 err = -ENOTSUPP;
390 goto err_out; 460 goto err_out;
391 } 461 }
392 opregion->enabled = 1; 462 opregion->enabled = 1;
393 463
394 if (mboxes & MBOX_SWSCI) { 464 if (mboxes & MBOX_SWSCI) {
395 DRM_DEBUG("SWSCI supported\n"); 465 DRM_DEBUG_DRIVER("SWSCI supported\n");
396 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 466 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
397 } 467 }
398 if (mboxes & MBOX_ASLE) { 468 if (mboxes & MBOX_ASLE) {
399 DRM_DEBUG("ASLE supported\n"); 469 DRM_DEBUG_DRIVER("ASLE supported\n");
400 opregion->asle = base + OPREGION_ASLE_OFFSET; 470 opregion->asle = base + OPREGION_ASLE_OFFSET;
401 opregion_enable_asle(dev); 471 opregion_enable_asle(dev);
402 } 472 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf68795..974b3cf70618 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
140#define MI_NOOP MI_INSTR(0, 0) 140#define MI_NOOP MI_INSTR(0, 0)
141#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 141#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
142#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) 142#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
143#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
143#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 144#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
144#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 145#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
145#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 146#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
151#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 152#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
152#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 153#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
153#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 154#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
155#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
156#define MI_OVERLAY_CONTINUE (0x0<<21)
157#define MI_OVERLAY_ON (0x1<<21)
158#define MI_OVERLAY_OFF (0x2<<21)
154#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 159#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
160#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
161#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
155#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 162#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
156#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 163#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
157#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 164#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
260#define HWS_PGA 0x02080 267#define HWS_PGA 0x02080
261#define HWS_ADDRESS_MASK 0xfffff000 268#define HWS_ADDRESS_MASK 0xfffff000
262#define HWS_START_ADDRESS_SHIFT 4 269#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */
271#define PWRCTX_EN (1<<0)
263#define IPEIR 0x02088 272#define IPEIR 0x02088
264#define IPEHR 0x0208c 273#define IPEHR 0x0208c
265#define INSTDONE 0x02090 274#define INSTDONE 0x02090
@@ -405,6 +414,13 @@
405# define GPIO_DATA_VAL_IN (1 << 12) 414# define GPIO_DATA_VAL_IN (1 << 12)
406# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 415# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
407 416
417#define GMBUS0 0x5100
418#define GMBUS1 0x5104
419#define GMBUS2 0x5108
420#define GMBUS3 0x510c
421#define GMBUS4 0x5110
422#define GMBUS5 0x5120
423
408/* 424/*
409 * Clock control & power management 425 * Clock control & power management
410 */ 426 */
@@ -435,7 +451,7 @@
435#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 451#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
436#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 452#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
437#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 453#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
438#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ 454#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
439 455
440#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 456#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
441#define I915_CRC_ERROR_ENABLE (1UL<<29) 457#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +528,7 @@
512 */ 528 */
513#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 529#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
514#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 530#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
515#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 531#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
516/* i830, required in DVO non-gang */ 532/* i830, required in DVO non-gang */
517#define PLL_P2_DIVIDE_BY_4 (1 << 23) 533#define PLL_P2_DIVIDE_BY_4 (1 << 23)
518#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 534#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +538,7 @@
522#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 538#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
523#define PLL_REF_INPUT_MASK (3 << 13) 539#define PLL_REF_INPUT_MASK (3 << 13)
524#define PLL_LOAD_PULSE_PHASE_SHIFT 9 540#define PLL_LOAD_PULSE_PHASE_SHIFT 9
525/* IGDNG */ 541/* Ironlake */
526# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 542# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
527# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) 543# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
528# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) 544# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +602,12 @@
586#define FPB0 0x06048 602#define FPB0 0x06048
587#define FPB1 0x0604c 603#define FPB1 0x0604c
588#define FP_N_DIV_MASK 0x003f0000 604#define FP_N_DIV_MASK 0x003f0000
589#define FP_N_IGD_DIV_MASK 0x00ff0000 605#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
590#define FP_N_DIV_SHIFT 16 606#define FP_N_DIV_SHIFT 16
591#define FP_M1_DIV_MASK 0x00003f00 607#define FP_M1_DIV_MASK 0x00003f00
592#define FP_M1_DIV_SHIFT 8 608#define FP_M1_DIV_SHIFT 8
593#define FP_M2_DIV_MASK 0x0000003f 609#define FP_M2_DIV_MASK 0x0000003f
594#define FP_M2_IGD_DIV_MASK 0x000000ff 610#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
595#define FP_M2_DIV_SHIFT 0 611#define FP_M2_DIV_SHIFT 0
596#define DPLL_TEST 0x606c 612#define DPLL_TEST 0x606c
597#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 613#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -769,7 +785,8 @@
769 785
770/** GM965 GM45 render standby register */ 786/** GM965 GM45 render standby register */
771#define MCHBAR_RENDER_STANDBY 0x111B8 787#define MCHBAR_RENDER_STANDBY 0x111B8
772 788#define RCX_SW_EXIT (1<<23)
789#define RSX_STATUS_MASK 0x00700000
773#define PEG_BAND_GAP_DATA 0x14d68 790#define PEG_BAND_GAP_DATA 0x14d68
774 791
775/* 792/*
@@ -844,7 +861,6 @@
844#define SDVOB_HOTPLUG_INT_EN (1 << 26) 861#define SDVOB_HOTPLUG_INT_EN (1 << 26)
845#define SDVOC_HOTPLUG_INT_EN (1 << 25) 862#define SDVOC_HOTPLUG_INT_EN (1 << 25)
846#define TV_HOTPLUG_INT_EN (1 << 18) 863#define TV_HOTPLUG_INT_EN (1 << 18)
847#define CRT_EOS_INT_EN (1 << 10)
848#define CRT_HOTPLUG_INT_EN (1 << 9) 864#define CRT_HOTPLUG_INT_EN (1 << 9)
849#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 865#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
850#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) 866#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -868,7 +884,6 @@
868 HDMID_HOTPLUG_INT_EN | \ 884 HDMID_HOTPLUG_INT_EN | \
869 SDVOB_HOTPLUG_INT_EN | \ 885 SDVOB_HOTPLUG_INT_EN | \
870 SDVOC_HOTPLUG_INT_EN | \ 886 SDVOC_HOTPLUG_INT_EN | \
871 TV_HOTPLUG_INT_EN | \
872 CRT_HOTPLUG_INT_EN) 887 CRT_HOTPLUG_INT_EN)
873 888
874 889
@@ -879,7 +894,6 @@
879#define DPC_HOTPLUG_INT_STATUS (1 << 28) 894#define DPC_HOTPLUG_INT_STATUS (1 << 28)
880#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 895#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
881#define DPD_HOTPLUG_INT_STATUS (1 << 27) 896#define DPD_HOTPLUG_INT_STATUS (1 << 27)
882#define CRT_EOS_INT_STATUS (1 << 12)
883#define CRT_HOTPLUG_INT_STATUS (1 << 11) 897#define CRT_HOTPLUG_INT_STATUS (1 << 11)
884#define TV_HOTPLUG_INT_STATUS (1 << 10) 898#define TV_HOTPLUG_INT_STATUS (1 << 10)
885#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 899#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1620,7 +1634,7 @@
1620#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) 1634#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1621 1635
1622#define DP_SCRAMBLING_DISABLE (1 << 12) 1636#define DP_SCRAMBLING_DISABLE (1 << 12)
1623#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) 1637#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
1624 1638
1625/** limit RGB values to avoid confusing TVs */ 1639/** limit RGB values to avoid confusing TVs */
1626#define DP_COLOR_RANGE_16_235 (1 << 8) 1640#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1808,7 +1822,7 @@
1808#define DSPFW3 0x7003c 1822#define DSPFW3 0x7003c
1809#define DSPFW_HPLL_SR_EN (1<<31) 1823#define DSPFW_HPLL_SR_EN (1<<31)
1810#define DSPFW_CURSOR_SR_SHIFT 24 1824#define DSPFW_CURSOR_SR_SHIFT 24
1811#define IGD_SELF_REFRESH_EN (1<<30) 1825#define PINEVIEW_SELF_REFRESH_EN (1<<30)
1812 1826
1813/* FIFO watermark sizes etc */ 1827/* FIFO watermark sizes etc */
1814#define G4X_FIFO_LINE_SIZE 64 1828#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +1838,16 @@
1824#define G4X_MAX_WM 0x3f 1838#define G4X_MAX_WM 0x3f
1825#define I915_MAX_WM 0x3f 1839#define I915_MAX_WM 0x3f
1826 1840
1827#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ 1841#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
1828#define IGD_FIFO_LINE_SIZE 64 1842#define PINEVIEW_FIFO_LINE_SIZE 64
1829#define IGD_MAX_WM 0x1ff 1843#define PINEVIEW_MAX_WM 0x1ff
1830#define IGD_DFT_WM 0x3f 1844#define PINEVIEW_DFT_WM 0x3f
1831#define IGD_DFT_HPLLOFF_WM 0 1845#define PINEVIEW_DFT_HPLLOFF_WM 0
1832#define IGD_GUARD_WM 10 1846#define PINEVIEW_GUARD_WM 10
1833#define IGD_CURSOR_FIFO 64 1847#define PINEVIEW_CURSOR_FIFO 64
1834#define IGD_CURSOR_MAX_WM 0x3f 1848#define PINEVIEW_CURSOR_MAX_WM 0x3f
1835#define IGD_CURSOR_DFT_WM 0 1849#define PINEVIEW_CURSOR_DFT_WM 0
1836#define IGD_CURSOR_GUARD_WM 5 1850#define PINEVIEW_CURSOR_GUARD_WM 5
1837 1851
1838/* 1852/*
1839 * The two pipe frame counter registers are not synchronized, so 1853 * The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
1907#define DISPPLANE_16BPP (0x5<<26) 1921#define DISPPLANE_16BPP (0x5<<26)
1908#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 1922#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1909#define DISPPLANE_32BPP (0x7<<26) 1923#define DISPPLANE_32BPP (0x7<<26)
1924#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
1910#define DISPPLANE_STEREO_ENABLE (1<<25) 1925#define DISPPLANE_STEREO_ENABLE (1<<25)
1911#define DISPPLANE_STEREO_DISABLE 0 1926#define DISPPLANE_STEREO_DISABLE 0
1912#define DISPPLANE_SEL_PIPE_MASK (1<<24) 1927#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +1933,7 @@
1918#define DISPPLANE_NO_LINE_DOUBLE 0 1933#define DISPPLANE_NO_LINE_DOUBLE 0
1919#define DISPPLANE_STEREO_POLARITY_FIRST 0 1934#define DISPPLANE_STEREO_POLARITY_FIRST 0
1920#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 1935#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1921#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */ 1936#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
1922#define DISPPLANE_TILED (1<<10) 1937#define DISPPLANE_TILED (1<<10)
1923#define DSPAADDR 0x70184 1938#define DSPAADDR 0x70184
1924#define DSPASTRIDE 0x70188 1939#define DSPASTRIDE 0x70188
@@ -1971,7 +1986,7 @@
1971# define VGA_2X_MODE (1 << 30) 1986# define VGA_2X_MODE (1 << 30)
1972# define VGA_PIPE_B_SELECT (1 << 29) 1987# define VGA_PIPE_B_SELECT (1 << 29)
1973 1988
1974/* IGDNG */ 1989/* Ironlake */
1975 1990
1976#define CPU_VGACNTRL 0x41000 1991#define CPU_VGACNTRL 0x41000
1977 1992
@@ -2117,6 +2132,7 @@
2117#define SDE_PORTC_HOTPLUG (1 << 9) 2132#define SDE_PORTC_HOTPLUG (1 << 9)
2118#define SDE_PORTB_HOTPLUG (1 << 8) 2133#define SDE_PORTB_HOTPLUG (1 << 8)
2119#define SDE_SDVOB_HOTPLUG (1 << 6) 2134#define SDE_SDVOB_HOTPLUG (1 << 6)
2135#define SDE_HOTPLUG_MASK (0xf << 8)
2120 2136
2121#define SDEISR 0xc4000 2137#define SDEISR 0xc4000
2122#define SDEIMR 0xc4004 2138#define SDEIMR 0xc4004
@@ -2157,6 +2173,13 @@
2157#define PCH_GPIOE 0xc5020 2173#define PCH_GPIOE 0xc5020
2158#define PCH_GPIOF 0xc5024 2174#define PCH_GPIOF 0xc5024
2159 2175
2176#define PCH_GMBUS0 0xc5100
2177#define PCH_GMBUS1 0xc5104
2178#define PCH_GMBUS2 0xc5108
2179#define PCH_GMBUS3 0xc510c
2180#define PCH_GMBUS4 0xc5110
2181#define PCH_GMBUS5 0xc5120
2182
2160#define PCH_DPLL_A 0xc6014 2183#define PCH_DPLL_A 0xc6014
2161#define PCH_DPLL_B 0xc6018 2184#define PCH_DPLL_B 0xc6018
2162 2185
@@ -2292,7 +2315,7 @@
2292#define FDI_DP_PORT_WIDTH_X3 (2<<19) 2315#define FDI_DP_PORT_WIDTH_X3 (2<<19)
2293#define FDI_DP_PORT_WIDTH_X4 (3<<19) 2316#define FDI_DP_PORT_WIDTH_X4 (3<<19)
2294#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 2317#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
2295/* IGDNG: hardwired to 1 */ 2318/* Ironlake: hardwired to 1 */
2296#define FDI_TX_PLL_ENABLE (1<<14) 2319#define FDI_TX_PLL_ENABLE (1<<14)
2297/* both Tx and Rx */ 2320/* both Tx and Rx */
2298#define FDI_SCRAMBLING_ENABLE (0<<7) 2321#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44e..d5ebb00a9d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "i915_drm.h" 29#include "i915_drm.h"
30#include "i915_drv.h" 30#include "intel_drv.h"
31 31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 if (IS_IGDNG(dev)) { 37 if (IS_IRONLAKE(dev)) {
38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
39 } else { 39 } else {
40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
53 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
54 return; 54 return;
55 55
56 if (IS_IGDNG(dev)) 56 if (IS_IRONLAKE(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58 58
59 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
75 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
76 return; 76 return;
77 77
78 if (IS_IGDNG(dev)) 78 if (IS_IRONLAKE(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80 80
81 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IGDNG(dev)) { 242 if (IS_IRONLAKE(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 } 245 }
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
247 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
250 if (IS_IGDNG(dev)) { 250 if (IS_IRONLAKE(dev)) {
251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0); 251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1); 252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); 253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !IS_IGDNG(dev)) 259 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
267 if (!IS_IGDNG(dev)) 267 if (!IS_IRONLAKE(dev))
268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
269 269
270 if (IS_IGDNG(dev)) { 270 if (IS_IRONLAKE(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); 271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); 272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); 273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
304 /* Pipe & plane B info */ 304 /* Pipe & plane B info */
305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
307 if (IS_IGDNG(dev)) { 307 if (IS_IRONLAKE(dev)) {
308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0); 308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1); 309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); 310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !IS_IGDNG(dev)) 316 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
324 if (!IS_IGDNG(dev)) 324 if (!IS_IRONLAKE(dev))
325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
326 326
327 if (IS_IGDNG(dev)) { 327 if (IS_IRONLAKE(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); 328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); 329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); 330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
369 if (drm_core_check_feature(dev, DRIVER_MODESET)) 369 if (drm_core_check_feature(dev, DRIVER_MODESET))
370 return; 370 return;
371 371
372 if (IS_IGDNG(dev)) { 372 if (IS_IRONLAKE(dev)) {
373 dpll_a_reg = PCH_DPLL_A; 373 dpll_a_reg = PCH_DPLL_A;
374 dpll_b_reg = PCH_DPLL_B; 374 dpll_b_reg = PCH_DPLL_B;
375 fpa0_reg = PCH_FPA0; 375 fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
385 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
386 } 386 }
387 387
388 if (IS_IGDNG(dev)) { 388 if (IS_IRONLAKE(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 } 391 }
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
402 /* Actually enable it */ 402 /* Actually enable it */
403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
404 DRM_UDELAY(150); 404 DRM_UDELAY(150);
405 if (IS_I965G(dev) && !IS_IGDNG(dev)) 405 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
407 DRM_UDELAY(150); 407 DRM_UDELAY(150);
408 408
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
416 if (!IS_IGDNG(dev)) 416 if (!IS_IRONLAKE(dev))
417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
418 418
419 if (IS_IGDNG(dev)) { 419 if (IS_IRONLAKE(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
467 /* Actually enable it */ 467 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
469 DRM_UDELAY(150); 469 DRM_UDELAY(150);
470 if (IS_I965G(dev) && !IS_IGDNG(dev)) 470 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
472 DRM_UDELAY(150); 472 DRM_UDELAY(150);
473 473
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
481 if (!IS_IGDNG(dev)) 481 if (!IS_IRONLAKE(dev))
482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
483 483
484 if (IS_IGDNG(dev)) { 484 if (IS_IRONLAKE(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
546 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 546 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
547 547
548 /* CRT state */ 548 /* CRT state */
549 if (IS_IGDNG(dev)) { 549 if (IS_IRONLAKE(dev)) {
550 dev_priv->saveADPA = I915_READ(PCH_ADPA); 550 dev_priv->saveADPA = I915_READ(PCH_ADPA);
551 } else { 551 } else {
552 dev_priv->saveADPA = I915_READ(ADPA); 552 dev_priv->saveADPA = I915_READ(ADPA);
553 } 553 }
554 554
555 /* LVDS state */ 555 /* LVDS state */
556 if (IS_IGDNG(dev)) { 556 if (IS_IRONLAKE(dev)) {
557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
571 dev_priv->saveLVDS = I915_READ(LVDS); 571 dev_priv->saveLVDS = I915_READ(LVDS);
572 } 572 }
573 573
574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
576 576
577 if (IS_IGDNG(dev)) { 577 if (IS_IRONLAKE(dev)) {
578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
614 dev_priv->saveVGA0 = I915_READ(VGA0); 614 dev_priv->saveVGA0 = I915_READ(VGA0);
615 dev_priv->saveVGA1 = I915_READ(VGA1); 615 dev_priv->saveVGA1 = I915_READ(VGA1);
616 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 616 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
617 if (IS_IGDNG(dev)) 617 if (IS_IRONLAKE(dev))
618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
619 else 619 else
620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
657 657
658 /* CRT state */ 658 /* CRT state */
659 if (IS_IGDNG(dev)) 659 if (IS_IRONLAKE(dev))
660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
661 else 661 else
662 I915_WRITE(ADPA, dev_priv->saveADPA); 662 I915_WRITE(ADPA, dev_priv->saveADPA);
663 663
664 /* LVDS state */ 664 /* LVDS state */
665 if (IS_I965G(dev) && !IS_IGDNG(dev)) 665 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
667 667
668 if (IS_IGDNG(dev)) { 668 if (IS_IRONLAKE(dev)) {
669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
670 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 670 } else if (IS_MOBILE(dev) && !IS_I830(dev))
671 I915_WRITE(LVDS, dev_priv->saveLVDS); 671 I915_WRITE(LVDS, dev_priv->saveLVDS);
672 672
673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
675 675
676 if (IS_IGDNG(dev)) { 676 if (IS_IRONLAKE(dev)) {
677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
713 } 713 }
714 714
715 /* VGA state */ 715 /* VGA state */
716 if (IS_IGDNG(dev)) 716 if (IS_IRONLAKE(dev))
717 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 717 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
718 else 718 else
719 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 719 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 734
735 /* Render Standby */ 735 /* Render Standby */
736 if (IS_I965G(dev) && IS_MOBILE(dev)) 736 if (I915_HAS_RC6(dev)) {
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738 dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
739 }
738 740
739 /* Hardware status page */ 741 /* Hardware status page */
740 dev_priv->saveHWS = I915_READ(HWS_PGA); 742 dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
742 i915_save_display(dev); 744 i915_save_display(dev);
743 745
744 /* Interrupt state */ 746 /* Interrupt state */
745 if (IS_IGDNG(dev)) { 747 if (IS_IRONLAKE(dev)) {
746 dev_priv->saveDEIER = I915_READ(DEIER); 748 dev_priv->saveDEIER = I915_READ(DEIER);
747 dev_priv->saveDEIMR = I915_READ(DEIMR); 749 dev_priv->saveDEIMR = I915_READ(DEIMR);
748 dev_priv->saveGTIER = I915_READ(GTIER); 750 dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
754 dev_priv->saveIMR = I915_READ(IMR); 756 dev_priv->saveIMR = I915_READ(IMR);
755 } 757 }
756 758
757 /* Clock gating state */
758 dev_priv->saveD_STATE = I915_READ(D_STATE);
759 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
760
761 /* Cache mode state */ 759 /* Cache mode state */
762 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 760 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
763 761
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
796 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
797 795
798 /* Render Standby */ 796 /* Render Standby */
799 if (IS_I965G(dev) && IS_MOBILE(dev)) 797 if (I915_HAS_RC6(dev)) {
800 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 798 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
799 I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
800 }
801 801
802 /* Hardware status page */ 802 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 803 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
817 i915_restore_display(dev); 817 i915_restore_display(dev);
818 818
819 /* Interrupt state */ 819 /* Interrupt state */
820 if (IS_IGDNG(dev)) { 820 if (IS_IRONLAKE(dev)) {
821 I915_WRITE(DEIER, dev_priv->saveDEIER); 821 I915_WRITE(DEIER, dev_priv->saveDEIER);
822 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 822 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
823 I915_WRITE(GTIER, dev_priv->saveGTIER); 823 I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
830 } 830 }
831 831
832 /* Clock gating state */ 832 /* Clock gating state */
833 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 833 intel_init_clock_gating(dev);
834 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
835 834
836 /* Cache mode state */ 835 /* Cache mode state */
837 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 836 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
846 for (i = 0; i < 3; i++) 845 for (i = 0; i < 3; i++)
847 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 846 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
848 847
848 /* I2C state */
849 intel_i2c_reset_gmbus(dev);
850
849 return 0; 851 return 0;
850} 852}
851 853
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e6..f27567747580 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
114 struct lvds_dvo_timing *dvo_timing; 114 struct lvds_dvo_timing *dvo_timing;
115 struct drm_display_mode *panel_fixed_mode; 115 struct drm_display_mode *panel_fixed_mode;
116 int lfp_data_size, dvo_timing_offset; 116 int lfp_data_size, dvo_timing_offset;
117 int i, temp_downclock;
118 struct drm_display_mode *temp_mode;
117 119
118 /* Defaults if we can't find VBT info */ 120 /* Defaults if we can't find VBT info */
119 dev_priv->lvds_dither = 0; 121 dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
159 161
160 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 162 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
161 163
162 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 164 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
163 drm_mode_debug_printmodeline(panel_fixed_mode); 165 drm_mode_debug_printmodeline(panel_fixed_mode);
164 166
167 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
168 temp_downclock = panel_fixed_mode->clock;
169 /*
170 * enumerate the LVDS panel timing info entry in VBT to check whether
171 * the LVDS downclock is found.
172 */
173 for (i = 0; i < 16; i++) {
174 entry = (struct bdb_lvds_lfp_data_entry *)
175 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
176 dvo_timing = (struct lvds_dvo_timing *)
177 ((unsigned char *)entry + dvo_timing_offset);
178
179 fill_detail_timing_data(temp_mode, dvo_timing);
180
181 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
182 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
183 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
184 temp_mode->htotal == panel_fixed_mode->htotal &&
185 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
186 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
187 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
188 temp_mode->vtotal == panel_fixed_mode->vtotal &&
189 temp_mode->clock < temp_downclock) {
190 /*
191 * downclock is already found. But we expect
192 * to find the lower downclock.
193 */
194 temp_downclock = temp_mode->clock;
195 }
196 /* clear it to zero */
197 memset(temp_mode, 0, sizeof(*temp_mode));
198 }
199 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) {
201 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
204 "Normal Clock %dKHz, downclock %dKHz\n",
205 temp_downclock, panel_fixed_mode->clock);
206 }
165 return; 207 return;
166} 208}
167 209
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
217 if (IS_I85X(dev_priv->dev)) 259 if (IS_I85X(dev_priv->dev))
218 dev_priv->lvds_ssc_freq = 260 dev_priv->lvds_ssc_freq =
219 general->ssc_freq ? 66 : 48; 261 general->ssc_freq ? 66 : 48;
220 else if (IS_IGDNG(dev_priv->dev)) 262 else if (IS_IRONLAKE(dev_priv->dev))
221 dev_priv->lvds_ssc_freq = 263 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 120; 264 general->ssc_freq ? 100 : 120;
223 else 265 else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
241 GPIOF, 283 GPIOF,
242 }; 284 };
243 285
244 /* Set sensible defaults in case we can't find the general block
245 or it is the wrong chipset */
246 dev_priv->crt_ddc_bus = -1;
247
248 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 286 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
249 if (general) { 287 if (general) {
250 u16 block_size = get_blocksize(general); 288 u16 block_size = get_blocksize(general);
251 if (block_size >= sizeof(*general)) { 289 if (block_size >= sizeof(*general)) {
252 int bus_pin = general->crt_ddc_gmbus_pin; 290 int bus_pin = general->crt_ddc_gmbus_pin;
253 DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); 291 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
254 if ((bus_pin >= 1) && (bus_pin <= 6)) { 292 if ((bus_pin >= 1) && (bus_pin <= 6)) {
255 dev_priv->crt_ddc_bus = 293 dev_priv->crt_ddc_bus =
256 crt_bus_map_table[bus_pin-1]; 294 crt_bus_map_table[bus_pin-1];
257 } 295 }
258 } else { 296 } else {
259 DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", 297 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
260 block_size); 298 block_size);
261 } 299 }
262 } 300 }
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
274 312
275 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 313 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
276 if (!p_defs) { 314 if (!p_defs) {
277 DRM_DEBUG("No general definition block is found\n"); 315 DRM_DEBUG_KMS("No general definition block is found\n");
278 return; 316 return;
279 } 317 }
280 /* judge whether the size of child device meets the requirements. 318 /* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
284 */ 322 */
285 if (p_defs->child_dev_size != sizeof(*p_child)) { 323 if (p_defs->child_dev_size != sizeof(*p_child)) {
286 /* different child dev size . Ignore it */ 324 /* different child dev size . Ignore it */
287 DRM_DEBUG("different child size is found. Invalid.\n"); 325 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
288 return; 326 return;
289 } 327 }
290 /* get the block size of general definitions */ 328 /* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
310 if (p_child->dvo_port != DEVICE_PORT_DVOB && 348 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
311 p_child->dvo_port != DEVICE_PORT_DVOC) { 349 p_child->dvo_port != DEVICE_PORT_DVOC) {
312 /* skip the incorrect SDVO port */ 350 /* skip the incorrect SDVO port */
313 DRM_DEBUG("Incorrect SDVO port. Skip it \n"); 351 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
314 continue; 352 continue;
315 } 353 }
316 DRM_DEBUG("the SDVO device with slave addr %2x is found on " 354 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
317 "%s port\n", 355 " %s port\n",
318 p_child->slave_addr, 356 p_child->slave_addr,
319 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 357 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
320 "SDVOB" : "SDVOC"); 358 "SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
325 p_mapping->dvo_wiring = p_child->dvo_wiring; 363 p_mapping->dvo_wiring = p_child->dvo_wiring;
326 p_mapping->initialized = 1; 364 p_mapping->initialized = 1;
327 } else { 365 } else {
328 DRM_DEBUG("Maybe one SDVO port is shared by " 366 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
329 "two SDVO device.\n"); 367 "two SDVO device.\n");
330 } 368 }
331 if (p_child->slave2_addr) { 369 if (p_child->slave2_addr) {
332 /* Maybe this is a SDVO device with multiple inputs */ 370 /* Maybe this is a SDVO device with multiple inputs */
333 /* And the mapping info is not added */ 371 /* And the mapping info is not added */
334 DRM_DEBUG("there exists the slave2_addr. Maybe this " 372 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
335 "is a SDVO device with multiple inputs.\n"); 373 " is a SDVO device with multiple inputs.\n");
336 } 374 }
337 count++; 375 count++;
338 } 376 }
339 377
340 if (!count) { 378 if (!count) {
341 /* No SDVO device info is found */ 379 /* No SDVO device info is found */
342 DRM_DEBUG("No SDVO device info is found in VBT\n"); 380 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
343 } 381 }
344 return; 382 return;
345} 383}
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
366 dev_priv->render_reclock_avail = true; 404 dev_priv->render_reclock_avail = true;
367} 405}
368 406
407static void
408parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb)
410{
411 struct bdb_general_definitions *p_defs;
412 struct child_device_config *p_child, *child_dev_ptr;
413 int i, child_device_num, count;
414 u16 block_size;
415
416 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
417 if (!p_defs) {
418 DRM_DEBUG_KMS("No general definition block is found\n");
419 return;
420 }
421 /* judge whether the size of child device meets the requirements.
422 * If the child device size obtained from general definition block
423 * is different with sizeof(struct child_device_config), skip the
424 * parsing of sdvo device info
425 */
426 if (p_defs->child_dev_size != sizeof(*p_child)) {
427 /* different child dev size . Ignore it */
428 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
429 return;
430 }
431 /* get the block size of general definitions */
432 block_size = get_blocksize(p_defs);
433 /* get the number of child device */
434 child_device_num = (block_size - sizeof(*p_defs)) /
435 sizeof(*p_child);
436 count = 0;
437 /* get the number of child device that is present */
438 for (i = 0; i < child_device_num; i++) {
439 p_child = &(p_defs->devices[i]);
440 if (!p_child->device_type) {
441 /* skip the device block if device type is invalid */
442 continue;
443 }
444 count++;
445 }
446 if (!count) {
447 DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
448 return;
449 }
450 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
451 if (!dev_priv->child_dev) {
452 DRM_DEBUG_KMS("No memory space for child device\n");
453 return;
454 }
455
456 dev_priv->child_dev_num = count;
457 count = 0;
458 for (i = 0; i < child_device_num; i++) {
459 p_child = &(p_defs->devices[i]);
460 if (!p_child->device_type) {
461 /* skip the device block if device type is invalid */
462 continue;
463 }
464 child_dev_ptr = dev_priv->child_dev + count;
465 count++;
466 memcpy((void *)child_dev_ptr, (void *)p_child,
467 sizeof(*p_child));
468 }
469 return;
470}
369/** 471/**
370 * intel_init_bios - initialize VBIOS settings & find VBT 472 * intel_init_bios - initialize VBIOS settings & find VBT
371 * @dev: DRM device 473 * @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
417 parse_lfp_panel_data(dev_priv, bdb); 519 parse_lfp_panel_data(dev_priv, bdb);
418 parse_sdvo_panel_data(dev_priv, bdb); 520 parse_sdvo_panel_data(dev_priv, bdb);
419 parse_sdvo_device_mapping(dev_priv, bdb); 521 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb);
420 parse_driver_features(dev_priv, bdb); 523 parse_driver_features(dev_priv, bdb);
421 524
422 pci_unmap_rom(pdev, bios); 525 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7a..425ac9d7f724 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
549#define SWF14_APM_STANDBY 0x1 549#define SWF14_APM_STANDBY 0x1
550#define SWF14_APM_RESTORE 0x0 550#define SWF14_APM_RESTORE 0x0
551 551
552/* Add the device class for LFP, TV, HDMI */
553#define DEVICE_TYPE_INT_LFP 0x1022
554#define DEVICE_TYPE_INT_TV 0x1009
555#define DEVICE_TYPE_HDMI 0x60D2
556#define DEVICE_TYPE_DP 0x68C6
557#define DEVICE_TYPE_eDP 0x78C6
558
559/* define the DVO port for HDMI output type */
560#define DVO_B 1
561#define DVO_C 2
562#define DVO_D 3
563
564/* define the PORT for DP output type */
565#define PORT_IDPB 7
566#define PORT_IDPC 8
567#define PORT_IDPD 9
568
552#endif /* _I830_BIOS_H_ */ 569#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48e..9f3d3e563414 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IGDNG(dev)) 42 if (IS_IRONLAKE(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64 } 64 }
65 65
66 I915_WRITE(reg, temp); 66 I915_WRITE(reg, temp);
67
68 if (IS_IGD(dev)) {
69 if (mode == DRM_MODE_DPMS_OFF) {
70 /* turn off DAC */
71 temp = I915_READ(PORT_HOTPLUG_EN);
72 temp &= ~CRT_EOS_INT_EN;
73 I915_WRITE(PORT_HOTPLUG_EN, temp);
74
75 temp = I915_READ(PORT_HOTPLUG_STAT);
76 if (temp & CRT_EOS_INT_STATUS)
77 I915_WRITE(PORT_HOTPLUG_STAT,
78 CRT_EOS_INT_STATUS);
79 } else {
80 /* turn on DAC. EOS interrupt must be enabled after DAC
81 * is enabled, so it sounds not good to enable it in
82 * i915_driver_irq_postinstall()
83 * wait 12.5ms after DAC is enabled
84 */
85 msleep(13);
86 temp = I915_READ(PORT_HOTPLUG_STAT);
87 if (temp & CRT_EOS_INT_STATUS)
88 I915_WRITE(PORT_HOTPLUG_STAT,
89 CRT_EOS_INT_STATUS);
90 temp = I915_READ(PORT_HOTPLUG_EN);
91 temp |= CRT_EOS_INT_EN;
92 I915_WRITE(PORT_HOTPLUG_EN, temp);
93 }
94 }
95} 67}
96 68
97static int intel_crt_mode_valid(struct drm_connector *connector, 69static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
141 else 113 else
142 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
143 115
144 if (IS_IGDNG(dev)) 116 if (IS_IRONLAKE(dev))
145 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
146 else 118 else
147 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
150 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
151 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
152 */ 124 */
153 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
154 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
155 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
156 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
164 136
165 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
166 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
167 if (!IS_IGDNG(dev)) 139 if (!IS_IRONLAKE(dev))
168 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
169 } else { 141 } else {
170 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
171 if (!IS_IGDNG(dev)) 143 if (!IS_IRONLAKE(dev))
172 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
173 } 145 }
174 146
175 I915_WRITE(adpa_reg, adpa); 147 I915_WRITE(adpa_reg, adpa);
176} 148}
177 149
178static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) 150static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
179{ 151{
180 struct drm_device *dev = connector->dev; 152 struct drm_device *dev = connector->dev;
181 struct drm_i915_private *dev_priv = dev->dev_private; 153 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
194 ADPA_CRT_HOTPLUG_ENABLE | 166 ADPA_CRT_HOTPLUG_ENABLE |
195 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 167 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
196 168
197 DRM_DEBUG("pch crt adpa 0x%x", adpa); 169 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
198 I915_WRITE(PCH_ADPA, adpa); 170 I915_WRITE(PCH_ADPA, adpa);
199 171
200 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 172 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
227 u32 hotplug_en; 199 u32 hotplug_en;
228 int i, tries = 0; 200 int i, tries = 0;
229 201
230 if (IS_IGDNG(dev)) 202 if (IS_IRONLAKE(dev))
231 return intel_igdng_crt_detect_hotplug(connector); 203 return intel_ironlake_crt_detect_hotplug(connector);
232 204
233 /* 205 /*
234 * On 4 series desktop, CRT detect sequence need to be done twice 206 * On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
549 &intel_output->enc); 521 &intel_output->enc);
550 522
551 /* Set up the DDC bus. */ 523 /* Set up the DDC bus. */
552 if (IS_IGDNG(dev)) 524 if (IS_IRONLAKE(dev))
553 i2c_reg = PCH_GPIOA; 525 i2c_reg = PCH_GPIOA;
554 else { 526 else {
555 i2c_reg = GPIOA; 527 i2c_reg = GPIOA;
556 /* Use VBT information for CRT DDC if available */ 528 /* Use VBT information for CRT DDC if available */
557 if (dev_priv->crt_ddc_bus != -1) 529 if (dev_priv->crt_ddc_bus != 0)
558 i2c_reg = dev_priv->crt_ddc_bus; 530 i2c_reg = dev_priv->crt_ddc_bus;
559 } 531 }
560 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 532 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57a..52cd9b006da2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "intel_dp.h" 35#include "drm_dp_helper.h"
36 36
37#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
38 38
@@ -102,32 +102,32 @@ struct intel_limit {
102#define I9XX_DOT_MAX 400000 102#define I9XX_DOT_MAX 400000
103#define I9XX_VCO_MIN 1400000 103#define I9XX_VCO_MIN 1400000
104#define I9XX_VCO_MAX 2800000 104#define I9XX_VCO_MAX 2800000
105#define IGD_VCO_MIN 1700000 105#define PINEVIEW_VCO_MIN 1700000
106#define IGD_VCO_MAX 3500000 106#define PINEVIEW_VCO_MAX 3500000
107#define I9XX_N_MIN 1 107#define I9XX_N_MIN 1
108#define I9XX_N_MAX 6 108#define I9XX_N_MAX 6
109/* IGD's Ncounter is a ring counter */ 109/* Pineview's Ncounter is a ring counter */
110#define IGD_N_MIN 3 110#define PINEVIEW_N_MIN 3
111#define IGD_N_MAX 6 111#define PINEVIEW_N_MAX 6
112#define I9XX_M_MIN 70 112#define I9XX_M_MIN 70
113#define I9XX_M_MAX 120 113#define I9XX_M_MAX 120
114#define IGD_M_MIN 2 114#define PINEVIEW_M_MIN 2
115#define IGD_M_MAX 256 115#define PINEVIEW_M_MAX 256
116#define I9XX_M1_MIN 10 116#define I9XX_M1_MIN 10
117#define I9XX_M1_MAX 22 117#define I9XX_M1_MAX 22
118#define I9XX_M2_MIN 5 118#define I9XX_M2_MIN 5
119#define I9XX_M2_MAX 9 119#define I9XX_M2_MAX 9
120/* IGD M1 is reserved, and must be 0 */ 120/* Pineview M1 is reserved, and must be 0 */
121#define IGD_M1_MIN 0 121#define PINEVIEW_M1_MIN 0
122#define IGD_M1_MAX 0 122#define PINEVIEW_M1_MAX 0
123#define IGD_M2_MIN 0 123#define PINEVIEW_M2_MIN 0
124#define IGD_M2_MAX 254 124#define PINEVIEW_M2_MAX 254
125#define I9XX_P_SDVO_DAC_MIN 5 125#define I9XX_P_SDVO_DAC_MIN 5
126#define I9XX_P_SDVO_DAC_MAX 80 126#define I9XX_P_SDVO_DAC_MAX 80
127#define I9XX_P_LVDS_MIN 7 127#define I9XX_P_LVDS_MIN 7
128#define I9XX_P_LVDS_MAX 98 128#define I9XX_P_LVDS_MAX 98
129#define IGD_P_LVDS_MIN 7 129#define PINEVIEW_P_LVDS_MIN 7
130#define IGD_P_LVDS_MAX 112 130#define PINEVIEW_P_LVDS_MAX 112
131#define I9XX_P1_MIN 1 131#define I9XX_P1_MIN 1
132#define I9XX_P1_MAX 8 132#define I9XX_P1_MAX 8
133#define I9XX_P2_SDVO_DAC_SLOW 10 133#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,33 +234,33 @@ struct intel_limit {
234#define G4X_P2_DISPLAY_PORT_FAST 10 234#define G4X_P2_DISPLAY_PORT_FAST 10
235#define G4X_P2_DISPLAY_PORT_LIMIT 0 235#define G4X_P2_DISPLAY_PORT_LIMIT 0
236 236
237/* IGDNG */ 237/* Ironlake */
238/* as we calculate clock using (register_value + 2) for 238/* as we calculate clock using (register_value + 2) for
239 N/M1/M2, so here the range value for them is (actual_value-2). 239 N/M1/M2, so here the range value for them is (actual_value-2).
240 */ 240 */
241#define IGDNG_DOT_MIN 25000 241#define IRONLAKE_DOT_MIN 25000
242#define IGDNG_DOT_MAX 350000 242#define IRONLAKE_DOT_MAX 350000
243#define IGDNG_VCO_MIN 1760000 243#define IRONLAKE_VCO_MIN 1760000
244#define IGDNG_VCO_MAX 3510000 244#define IRONLAKE_VCO_MAX 3510000
245#define IGDNG_N_MIN 1 245#define IRONLAKE_N_MIN 1
246#define IGDNG_N_MAX 5 246#define IRONLAKE_N_MAX 5
247#define IGDNG_M_MIN 79 247#define IRONLAKE_M_MIN 79
248#define IGDNG_M_MAX 118 248#define IRONLAKE_M_MAX 118
249#define IGDNG_M1_MIN 12 249#define IRONLAKE_M1_MIN 12
250#define IGDNG_M1_MAX 23 250#define IRONLAKE_M1_MAX 23
251#define IGDNG_M2_MIN 5 251#define IRONLAKE_M2_MIN 5
252#define IGDNG_M2_MAX 9 252#define IRONLAKE_M2_MAX 9
253#define IGDNG_P_SDVO_DAC_MIN 5 253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IGDNG_P_SDVO_DAC_MAX 80 254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IGDNG_P_LVDS_MIN 28 255#define IRONLAKE_P_LVDS_MIN 28
256#define IGDNG_P_LVDS_MAX 112 256#define IRONLAKE_P_LVDS_MAX 112
257#define IGDNG_P1_MIN 1 257#define IRONLAKE_P1_MIN 1
258#define IGDNG_P1_MAX 8 258#define IRONLAKE_P1_MAX 8
259#define IGDNG_P2_SDVO_DAC_SLOW 10 259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IGDNG_P2_SDVO_DAC_FAST 5 260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IGDNG_P2_LVDS_SLOW 14 /* single channel */ 261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IGDNG_P2_LVDS_FAST 7 /* double channel */ 262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */ 263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 264
265static bool 265static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@ static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 273 int target, int refclk, intel_clock_t *best_clock);
274static bool 274static bool
275intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock); 276 int target, int refclk, intel_clock_t *best_clock);
277 277
278static bool 278static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
280 int target, int refclk, intel_clock_t *best_clock); 280 int target, int refclk, intel_clock_t *best_clock);
281static bool 281static bool
282intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, 282intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
283 int target, int refclk, intel_clock_t *best_clock); 283 int target, int refclk, intel_clock_t *best_clock);
284 284
285static const intel_limit_t intel_limits_i8xx_dvo = { 285static const intel_limit_t intel_limits_i8xx_dvo = {
286 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 286 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = {
453 .find_pll = intel_find_pll_g4x_dp, 453 .find_pll = intel_find_pll_g4x_dp,
454}; 454};
455 455
456static const intel_limit_t intel_limits_igd_sdvo = { 456static const intel_limit_t intel_limits_pineview_sdvo = {
457 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 457 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
458 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 458 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
459 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 459 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
460 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 460 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
461 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 461 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
462 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 462 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
463 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 463 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
464 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 464 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = {
468 .find_reduced_pll = intel_find_best_reduced_PLL, 468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 469};
470 470
471static const intel_limit_t intel_limits_igd_lvds = { 471static const intel_limit_t intel_limits_pineview_lvds = {
472 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 472 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
473 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 473 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
474 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 474 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
475 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 475 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
476 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 476 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
477 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 477 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
478 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, 478 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
479 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 479 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
480 /* IGD only supports single-channel mode. */ 480 /* Pineview only supports single-channel mode. */
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 483 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL, 484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 485};
486 486
487static const intel_limit_t intel_limits_igdng_sdvo = { 487static const intel_limit_t intel_limits_ironlake_sdvo = {
488 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 488 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 489 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 490 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
491 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 491 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
492 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 492 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 493 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX }, 494 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
495 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 495 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
496 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, 497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
498 .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, 498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
499 .find_pll = intel_igdng_find_best_PLL, 499 .find_pll = intel_ironlake_find_best_PLL,
500}; 500};
501 501
502static const intel_limit_t intel_limits_igdng_lvds = { 502static const intel_limit_t intel_limits_ironlake_lvds = {
503 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 503 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 504 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 505 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
506 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 506 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
507 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 507 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 508 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX }, 509 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
510 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 510 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
511 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IGDNG_P2_LVDS_SLOW, 512 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
513 .p2_fast = IGDNG_P2_LVDS_FAST }, 513 .p2_fast = IRONLAKE_P2_LVDS_FAST },
514 .find_pll = intel_igdng_find_best_PLL, 514 .find_pll = intel_ironlake_find_best_PLL,
515}; 515};
516 516
517static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) 517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 518{
519 const intel_limit_t *limit; 519 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
521 limit = &intel_limits_igdng_lvds; 521 limit = &intel_limits_ironlake_lvds;
522 else 522 else
523 limit = &intel_limits_igdng_sdvo; 523 limit = &intel_limits_ironlake_sdvo;
524 524
525 return limit; 525 return limit;
526} 526}
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
557 struct drm_device *dev = crtc->dev; 557 struct drm_device *dev = crtc->dev;
558 const intel_limit_t *limit; 558 const intel_limit_t *limit;
559 559
560 if (IS_IGDNG(dev)) 560 if (IS_IRONLAKE(dev))
561 limit = intel_igdng_limit(crtc); 561 limit = intel_ironlake_limit(crtc);
562 else if (IS_G4X(dev)) { 562 else if (IS_G4X(dev)) {
563 limit = intel_g4x_limit(crtc); 563 limit = intel_g4x_limit(crtc);
564 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 564 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
566 limit = &intel_limits_i9xx_lvds; 566 limit = &intel_limits_i9xx_lvds;
567 else 567 else
568 limit = &intel_limits_i9xx_sdvo; 568 limit = &intel_limits_i9xx_sdvo;
569 } else if (IS_IGD(dev)) { 569 } else if (IS_PINEVIEW(dev)) {
570 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 570 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
571 limit = &intel_limits_igd_lvds; 571 limit = &intel_limits_pineview_lvds;
572 else 572 else
573 limit = &intel_limits_igd_sdvo; 573 limit = &intel_limits_pineview_sdvo;
574 } else { 574 } else {
575 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 575 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
576 limit = &intel_limits_i8xx_lvds; 576 limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
580 return limit; 580 return limit;
581} 581}
582 582
583/* m1 is reserved as 0 in IGD, n is a ring counter */ 583/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static void igd_clock(int refclk, intel_clock_t *clock) 584static void pineview_clock(int refclk, intel_clock_t *clock)
585{ 585{
586 clock->m = clock->m2 + 2; 586 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 587 clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
591 591
592static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 592static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
593{ 593{
594 if (IS_IGD(dev)) { 594 if (IS_PINEVIEW(dev)) {
595 igd_clock(refclk, clock); 595 pineview_clock(refclk, clock);
596 return; 596 return;
597 } 597 }
598 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 598 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
657 INTELPllInvalid ("m2 out of range\n"); 657 INTELPllInvalid ("m2 out of range\n");
658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
659 INTELPllInvalid ("m1 out of range\n"); 659 INTELPllInvalid ("m1 out of range\n");
660 if (clock->m1 <= clock->m2 && !IS_IGD(dev)) 660 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
661 INTELPllInvalid ("m1 <= m2\n"); 661 INTELPllInvalid ("m1 <= m2\n");
662 if (clock->m < limit->m.min || limit->m.max < clock->m) 662 if (clock->m < limit->m.min || limit->m.max < clock->m)
663 INTELPllInvalid ("m out of range\n"); 663 INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
706 706
707 memset (best_clock, 0, sizeof (*best_clock)); 707 memset (best_clock, 0, sizeof (*best_clock));
708 708
709 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 709 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
710 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 710 clock.m1++) {
711 clock.m1++) { 711 for (clock.m2 = limit->m2.min;
712 for (clock.m2 = limit->m2.min; 712 clock.m2 <= limit->m2.max; clock.m2++) {
713 clock.m2 <= limit->m2.max; clock.m2++) { 713 /* m1 is always 0 in Pineview */
714 /* m1 is always 0 in IGD */ 714 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
715 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 715 break;
716 break; 716 for (clock.n = limit->n.min;
717 for (clock.n = limit->n.min; 717 clock.n <= limit->n.max; clock.n++) {
718 clock.n <= limit->n.max; clock.n++) { 718 for (clock.p1 = limit->p1.min;
719 clock.p1 <= limit->p1.max; clock.p1++) {
719 int this_err; 720 int this_err;
720 721
721 intel_clock(dev, refclk, &clock); 722 intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
751 752
752 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
753 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { 754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
754 /* m1 is always 0 in IGD */ 755 /* m1 is always 0 in Pineview */
755 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
756 break; 757 break;
757 for (clock.n = limit->n.min; clock.n <= limit->n.max; 758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
758 clock.n++) { 759 clock.n++) {
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
833} 834}
834 835
835static bool 836static bool
836intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 837intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
837 int target, int refclk, intel_clock_t *best_clock) 838 int target, int refclk, intel_clock_t *best_clock)
838{ 839{
839 struct drm_device *dev = crtc->dev; 840 struct drm_device *dev = crtc->dev;
840 intel_clock_t clock; 841 intel_clock_t clock;
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857} 858}
858 859
859static bool 860static bool
860intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
861 int target, int refclk, intel_clock_t *best_clock) 862 int target, int refclk, intel_clock_t *best_clock)
862{ 863{
863 struct drm_device *dev = crtc->dev; 864 struct drm_device *dev = crtc->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private; 865 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
871 return true; 872 return true;
872 873
873 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
874 return intel_find_pll_igdng_dp(limit, crtc, target, 875 return intel_find_pll_ironlake_dp(limit, crtc, target,
875 refclk, best_clock); 876 refclk, best_clock);
876 877
877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@ void
949intel_wait_for_vblank(struct drm_device *dev) 950intel_wait_for_vblank(struct drm_device *dev)
950{ 951{
951 /* Wait for 20ms, i.e. one cycle at 50hz. */ 952 /* Wait for 20ms, i.e. one cycle at 50hz. */
952 mdelay(20); 953 msleep(20);
953} 954}
954 955
955/* Parameters have changed, update FBC info */ 956/* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
994 fbc_ctl |= dev_priv->cfb_fence; 995 fbc_ctl |= dev_priv->cfb_fence;
995 I915_WRITE(FBC_CONTROL, fbc_ctl); 996 I915_WRITE(FBC_CONTROL, fbc_ctl);
996 997
997 DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", 998 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
998 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 999 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
999} 1000}
1000 1001
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
1017 1018
1018 intel_wait_for_vblank(dev); 1019 intel_wait_for_vblank(dev);
1019 1020
1020 DRM_DEBUG("disabled FBC\n"); 1021 DRM_DEBUG_KMS("disabled FBC\n");
1021} 1022}
1022 1023
1023static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1024static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1062 /* enable it... */ 1063 /* enable it... */
1063 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 1064 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1064 1065
1065 DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); 1066 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1066} 1067}
1067 1068
1068void g4x_disable_fbc(struct drm_device *dev) 1069void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev)
1076 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1077 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1077 intel_wait_for_vblank(dev); 1078 intel_wait_for_vblank(dev);
1078 1079
1079 DRM_DEBUG("disabled FBC\n"); 1080 DRM_DEBUG_KMS("disabled FBC\n");
1080} 1081}
1081 1082
1082static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1083static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1141 * - going to an unsupported config (interlace, pixel multiply, etc.) 1142 * - going to an unsupported config (interlace, pixel multiply, etc.)
1142 */ 1143 */
1143 if (intel_fb->obj->size > dev_priv->cfb_size) { 1144 if (intel_fb->obj->size > dev_priv->cfb_size) {
1144 DRM_DEBUG("framebuffer too large, disabling compression\n"); 1145 DRM_DEBUG_KMS("framebuffer too large, disabling "
1146 "compression\n");
1145 goto out_disable; 1147 goto out_disable;
1146 } 1148 }
1147 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1149 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1148 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1150 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1149 DRM_DEBUG("mode incompatible with compression, disabling\n"); 1151 DRM_DEBUG_KMS("mode incompatible with compression, "
1152 "disabling\n");
1150 goto out_disable; 1153 goto out_disable;
1151 } 1154 }
1152 if ((mode->hdisplay > 2048) || 1155 if ((mode->hdisplay > 2048) ||
1153 (mode->vdisplay > 1536)) { 1156 (mode->vdisplay > 1536)) {
1154 DRM_DEBUG("mode too large for compression, disabling\n"); 1157 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1155 goto out_disable; 1158 goto out_disable;
1156 } 1159 }
1157 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1160 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1158 DRM_DEBUG("plane not 0, disabling compression\n"); 1161 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1159 goto out_disable; 1162 goto out_disable;
1160 } 1163 }
1161 if (obj_priv->tiling_mode != I915_TILING_X) { 1164 if (obj_priv->tiling_mode != I915_TILING_X) {
1162 DRM_DEBUG("framebuffer not tiled, disabling compression\n"); 1165 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1163 goto out_disable; 1166 goto out_disable;
1164 } 1167 }
1165 1168
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1181 return; 1184 return;
1182 1185
1183out_disable: 1186out_disable:
1184 DRM_DEBUG("unsupported config, disabling FBC\n"); 1187 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1185 /* Multiple disables should be harmless */ 1188 /* Multiple disables should be harmless */
1186 if (dev_priv->display.fbc_enabled(crtc)) 1189 if (dev_priv->display.fbc_enabled(crtc))
1187 dev_priv->display.disable_fbc(dev); 1190 dev_priv->display.disable_fbc(dev);
1188} 1191}
1189 1192
1190static int 1193static int
1194intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1195{
1196 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1197 u32 alignment;
1198 int ret;
1199
1200 switch (obj_priv->tiling_mode) {
1201 case I915_TILING_NONE:
1202 alignment = 64 * 1024;
1203 break;
1204 case I915_TILING_X:
1205 /* pin() will align the object as required by fence */
1206 alignment = 0;
1207 break;
1208 case I915_TILING_Y:
1209 /* FIXME: Is this true? */
1210 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1211 return -EINVAL;
1212 default:
1213 BUG();
1214 }
1215
1216 ret = i915_gem_object_pin(obj, alignment);
1217 if (ret != 0)
1218 return ret;
1219
1220 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1221 * fence, whereas 965+ only requires a fence if using
1222 * framebuffer compression. For simplicity, we always install
1223 * a fence as the cost is not that onerous.
1224 */
1225 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1226 obj_priv->tiling_mode != I915_TILING_NONE) {
1227 ret = i915_gem_object_get_fence_reg(obj);
1228 if (ret != 0) {
1229 i915_gem_object_unpin(obj);
1230 return ret;
1231 }
1232 }
1233
1234 return 0;
1235}
1236
1237static int
1191intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1238intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1192 struct drm_framebuffer *old_fb) 1239 struct drm_framebuffer *old_fb)
1193{ 1240{
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1206 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; 1253 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1207 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); 1254 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1208 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1255 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1209 u32 dspcntr, alignment; 1256 u32 dspcntr;
1210 int ret; 1257 int ret;
1211 1258
1212 /* no fb bound */ 1259 /* no fb bound */
1213 if (!crtc->fb) { 1260 if (!crtc->fb) {
1214 DRM_DEBUG("No FB bound\n"); 1261 DRM_DEBUG_KMS("No FB bound\n");
1215 return 0; 1262 return 0;
1216 } 1263 }
1217 1264
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1228 obj = intel_fb->obj; 1275 obj = intel_fb->obj;
1229 obj_priv = obj->driver_private; 1276 obj_priv = obj->driver_private;
1230 1277
1231 switch (obj_priv->tiling_mode) {
1232 case I915_TILING_NONE:
1233 alignment = 64 * 1024;
1234 break;
1235 case I915_TILING_X:
1236 /* pin() will align the object as required by fence */
1237 alignment = 0;
1238 break;
1239 case I915_TILING_Y:
1240 /* FIXME: Is this true? */
1241 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1242 return -EINVAL;
1243 default:
1244 BUG();
1245 }
1246
1247 mutex_lock(&dev->struct_mutex); 1278 mutex_lock(&dev->struct_mutex);
1248 ret = i915_gem_object_pin(obj, alignment); 1279 ret = intel_pin_and_fence_fb_obj(dev, obj);
1249 if (ret != 0) { 1280 if (ret != 0) {
1250 mutex_unlock(&dev->struct_mutex); 1281 mutex_unlock(&dev->struct_mutex);
1251 return ret; 1282 return ret;
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1258 return ret; 1289 return ret;
1259 } 1290 }
1260 1291
1261 /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
1262 * whereas 965+ only requires a fence if using framebuffer compression.
1263 * For simplicity, we always install a fence as the cost is not that onerous.
1264 */
1265 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1266 obj_priv->tiling_mode != I915_TILING_NONE) {
1267 ret = i915_gem_object_get_fence_reg(obj);
1268 if (ret != 0) {
1269 i915_gem_object_unpin(obj);
1270 mutex_unlock(&dev->struct_mutex);
1271 return ret;
1272 }
1273 }
1274
1275 dspcntr = I915_READ(dspcntr_reg); 1292 dspcntr = I915_READ(dspcntr_reg);
1276 /* Mask out pixel format bits in case we change it */ 1293 /* Mask out pixel format bits in case we change it */
1277 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1294 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1287 break; 1304 break;
1288 case 24: 1305 case 24:
1289 case 32: 1306 case 32:
1290 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1307 if (crtc->fb->depth == 30)
1308 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1309 else
1310 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1291 break; 1311 break;
1292 default: 1312 default:
1293 DRM_ERROR("Unknown color depth\n"); 1313 DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1302 dspcntr &= ~DISPPLANE_TILED; 1322 dspcntr &= ~DISPPLANE_TILED;
1303 } 1323 }
1304 1324
1305 if (IS_IGDNG(dev)) 1325 if (IS_IRONLAKE(dev))
1306 /* must disable */ 1326 /* must disable */
1307 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1327 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1308 1328
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1311 Start = obj_priv->gtt_offset; 1331 Start = obj_priv->gtt_offset;
1312 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1332 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1313 1333
1314 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1334 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
1315 I915_WRITE(dspstride, crtc->fb->pitch); 1335 I915_WRITE(dspstride, crtc->fb->pitch);
1316 if (IS_I965G(dev)) { 1336 if (IS_I965G(dev)) {
1317 I915_WRITE(dspbase, Offset); 1337 I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev)
1363 u8 sr1; 1383 u8 sr1;
1364 u32 vga_reg; 1384 u32 vga_reg;
1365 1385
1366 if (IS_IGDNG(dev)) 1386 if (IS_IRONLAKE(dev))
1367 vga_reg = CPU_VGACNTRL; 1387 vga_reg = CPU_VGACNTRL;
1368 else 1388 else
1369 vga_reg = VGACNTRL; 1389 vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev)
1379 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 1399 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1380} 1400}
1381 1401
1382static void igdng_disable_pll_edp (struct drm_crtc *crtc) 1402static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
1383{ 1403{
1384 struct drm_device *dev = crtc->dev; 1404 struct drm_device *dev = crtc->dev;
1385 struct drm_i915_private *dev_priv = dev->dev_private; 1405 struct drm_i915_private *dev_priv = dev->dev_private;
1386 u32 dpa_ctl; 1406 u32 dpa_ctl;
1387 1407
1388 DRM_DEBUG("\n"); 1408 DRM_DEBUG_KMS("\n");
1389 dpa_ctl = I915_READ(DP_A); 1409 dpa_ctl = I915_READ(DP_A);
1390 dpa_ctl &= ~DP_PLL_ENABLE; 1410 dpa_ctl &= ~DP_PLL_ENABLE;
1391 I915_WRITE(DP_A, dpa_ctl); 1411 I915_WRITE(DP_A, dpa_ctl);
1392} 1412}
1393 1413
1394static void igdng_enable_pll_edp (struct drm_crtc *crtc) 1414static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
1395{ 1415{
1396 struct drm_device *dev = crtc->dev; 1416 struct drm_device *dev = crtc->dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private; 1417 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
1404} 1424}
1405 1425
1406 1426
1407static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) 1427static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1408{ 1428{
1409 struct drm_device *dev = crtc->dev; 1429 struct drm_device *dev = crtc->dev;
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1430 struct drm_i915_private *dev_priv = dev->dev_private;
1411 u32 dpa_ctl; 1431 u32 dpa_ctl;
1412 1432
1413 DRM_DEBUG("eDP PLL enable for clock %d\n", clock); 1433 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
1414 dpa_ctl = I915_READ(DP_A); 1434 dpa_ctl = I915_READ(DP_A);
1415 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1435 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1416 1436
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
1440 udelay(500); 1460 udelay(500);
1441} 1461}
1442 1462
1443static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) 1463static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1444{ 1464{
1445 struct drm_device *dev = crtc->dev; 1465 struct drm_device *dev = crtc->dev;
1446 struct drm_i915_private *dev_priv = dev->dev_private; 1466 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1481 case DRM_MODE_DPMS_ON: 1501 case DRM_MODE_DPMS_ON:
1482 case DRM_MODE_DPMS_STANDBY: 1502 case DRM_MODE_DPMS_STANDBY:
1483 case DRM_MODE_DPMS_SUSPEND: 1503 case DRM_MODE_DPMS_SUSPEND:
1484 DRM_DEBUG("crtc %d dpms on\n", pipe); 1504 DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
1505
1506 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1507 temp = I915_READ(PCH_LVDS);
1508 if ((temp & LVDS_PORT_EN) == 0) {
1509 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
1510 POSTING_READ(PCH_LVDS);
1511 }
1512 }
1513
1485 if (HAS_eDP) { 1514 if (HAS_eDP) {
1486 /* enable eDP PLL */ 1515 /* enable eDP PLL */
1487 igdng_enable_pll_edp(crtc); 1516 ironlake_enable_pll_edp(crtc);
1488 } else { 1517 } else {
1489 /* enable PCH DPLL */ 1518 /* enable PCH DPLL */
1490 temp = I915_READ(pch_dpll_reg); 1519 temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1501 I915_READ(fdi_rx_reg); 1530 I915_READ(fdi_rx_reg);
1502 udelay(200); 1531 udelay(200);
1503 1532
1504 /* Enable CPU FDI TX PLL, always on for IGDNG */ 1533 /* Enable CPU FDI TX PLL, always on for Ironlake */
1505 temp = I915_READ(fdi_tx_reg); 1534 temp = I915_READ(fdi_tx_reg);
1506 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1535 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1507 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1536 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1568 udelay(150); 1597 udelay(150);
1569 1598
1570 temp = I915_READ(fdi_rx_iir_reg); 1599 temp = I915_READ(fdi_rx_iir_reg);
1571 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1600 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1572 1601
1573 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1602 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1574 for (j = 0; j < tries; j++) { 1603 for (j = 0; j < tries; j++) {
1575 temp = I915_READ(fdi_rx_iir_reg); 1604 temp = I915_READ(fdi_rx_iir_reg);
1576 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1605 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1606 temp);
1577 if (temp & FDI_RX_BIT_LOCK) 1607 if (temp & FDI_RX_BIT_LOCK)
1578 break; 1608 break;
1579 udelay(200); 1609 udelay(200);
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1582 I915_WRITE(fdi_rx_iir_reg, 1612 I915_WRITE(fdi_rx_iir_reg,
1583 temp | FDI_RX_BIT_LOCK); 1613 temp | FDI_RX_BIT_LOCK);
1584 else 1614 else
1585 DRM_DEBUG("train 1 fail\n"); 1615 DRM_DEBUG_KMS("train 1 fail\n");
1586 } else { 1616 } else {
1587 I915_WRITE(fdi_rx_iir_reg, 1617 I915_WRITE(fdi_rx_iir_reg,
1588 temp | FDI_RX_BIT_LOCK); 1618 temp | FDI_RX_BIT_LOCK);
1589 DRM_DEBUG("train 1 ok 2!\n"); 1619 DRM_DEBUG_KMS("train 1 ok 2!\n");
1590 } 1620 }
1591 temp = I915_READ(fdi_tx_reg); 1621 temp = I915_READ(fdi_tx_reg);
1592 temp &= ~FDI_LINK_TRAIN_NONE; 1622 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1601 udelay(150); 1631 udelay(150);
1602 1632
1603 temp = I915_READ(fdi_rx_iir_reg); 1633 temp = I915_READ(fdi_rx_iir_reg);
1604 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1634 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1605 1635
1606 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1636 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1607 for (j = 0; j < tries; j++) { 1637 for (j = 0; j < tries; j++) {
1608 temp = I915_READ(fdi_rx_iir_reg); 1638 temp = I915_READ(fdi_rx_iir_reg);
1609 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1639 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1640 temp);
1610 if (temp & FDI_RX_SYMBOL_LOCK) 1641 if (temp & FDI_RX_SYMBOL_LOCK)
1611 break; 1642 break;
1612 udelay(200); 1643 udelay(200);
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1614 if (j != tries) { 1645 if (j != tries) {
1615 I915_WRITE(fdi_rx_iir_reg, 1646 I915_WRITE(fdi_rx_iir_reg,
1616 temp | FDI_RX_SYMBOL_LOCK); 1647 temp | FDI_RX_SYMBOL_LOCK);
1617 DRM_DEBUG("train 2 ok 1!\n"); 1648 DRM_DEBUG_KMS("train 2 ok 1!\n");
1618 } else 1649 } else
1619 DRM_DEBUG("train 2 fail\n"); 1650 DRM_DEBUG_KMS("train 2 fail\n");
1620 } else { 1651 } else {
1621 I915_WRITE(fdi_rx_iir_reg, 1652 I915_WRITE(fdi_rx_iir_reg,
1622 temp | FDI_RX_SYMBOL_LOCK); 1653 temp | FDI_RX_SYMBOL_LOCK);
1623 DRM_DEBUG("train 2 ok 2!\n"); 1654 DRM_DEBUG_KMS("train 2 ok 2!\n");
1624 } 1655 }
1625 DRM_DEBUG("train done\n"); 1656 DRM_DEBUG_KMS("train done\n");
1626 1657
1627 /* set transcoder timing */ 1658 /* set transcoder timing */
1628 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1659 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1664 1695
1665 break; 1696 break;
1666 case DRM_MODE_DPMS_OFF: 1697 case DRM_MODE_DPMS_OFF:
1667 DRM_DEBUG("crtc %d dpms off\n", pipe); 1698 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1668
1669 i915_disable_vga(dev);
1670 1699
1671 /* Disable display plane */ 1700 /* Disable display plane */
1672 temp = I915_READ(dspcntr_reg); 1701 temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1677 I915_READ(dspbase_reg); 1706 I915_READ(dspbase_reg);
1678 } 1707 }
1679 1708
1709 i915_disable_vga(dev);
1710
1680 /* disable cpu pipe, disable after all planes disabled */ 1711 /* disable cpu pipe, disable after all planes disabled */
1681 temp = I915_READ(pipeconf_reg); 1712 temp = I915_READ(pipeconf_reg);
1682 if ((temp & PIPEACONF_ENABLE) != 0) { 1713 if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1690 udelay(500); 1721 udelay(500);
1691 continue; 1722 continue;
1692 } else { 1723 } else {
1693 DRM_DEBUG("pipe %d off delay\n", pipe); 1724 DRM_DEBUG_KMS("pipe %d off delay\n",
1725 pipe);
1694 break; 1726 break;
1695 } 1727 }
1696 } 1728 }
1697 } else 1729 } else
1698 DRM_DEBUG("crtc %d is disabled\n", pipe); 1730 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1699 1731
1700 if (HAS_eDP) { 1732 udelay(100);
1701 igdng_disable_pll_edp(crtc); 1733
1734 /* Disable PF */
1735 temp = I915_READ(pf_ctl_reg);
1736 if ((temp & PF_ENABLE) != 0) {
1737 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1738 I915_READ(pf_ctl_reg);
1702 } 1739 }
1740 I915_WRITE(pf_win_size, 0);
1703 1741
1704 /* disable CPU FDI tx and PCH FDI rx */ 1742 /* disable CPU FDI tx and PCH FDI rx */
1705 temp = I915_READ(fdi_tx_reg); 1743 temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1725 1763
1726 udelay(100); 1764 udelay(100);
1727 1765
1766 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1767 temp = I915_READ(PCH_LVDS);
1768 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
1769 I915_READ(PCH_LVDS);
1770 udelay(100);
1771 }
1772
1728 /* disable PCH transcoder */ 1773 /* disable PCH transcoder */
1729 temp = I915_READ(transconf_reg); 1774 temp = I915_READ(transconf_reg);
1730 if ((temp & TRANS_ENABLE) != 0) { 1775 if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1738 udelay(500); 1783 udelay(500);
1739 continue; 1784 continue;
1740 } else { 1785 } else {
1741 DRM_DEBUG("transcoder %d off delay\n", pipe); 1786 DRM_DEBUG_KMS("transcoder %d off "
1787 "delay\n", pipe);
1742 break; 1788 break;
1743 } 1789 }
1744 } 1790 }
1745 } 1791 }
1746 1792
1793 udelay(100);
1794
1747 /* disable PCH DPLL */ 1795 /* disable PCH DPLL */
1748 temp = I915_READ(pch_dpll_reg); 1796 temp = I915_READ(pch_dpll_reg);
1749 if ((temp & DPLL_VCO_ENABLE) != 0) { 1797 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1751 I915_READ(pch_dpll_reg); 1799 I915_READ(pch_dpll_reg);
1752 } 1800 }
1753 1801
1754 temp = I915_READ(fdi_rx_reg); 1802 if (HAS_eDP) {
1755 if ((temp & FDI_RX_PLL_ENABLE) != 0) { 1803 ironlake_disable_pll_edp(crtc);
1756 temp &= ~FDI_SEL_PCDCLK;
1757 temp &= ~FDI_RX_PLL_ENABLE;
1758 I915_WRITE(fdi_rx_reg, temp);
1759 I915_READ(fdi_rx_reg);
1760 } 1804 }
1761 1805
1806 temp = I915_READ(fdi_rx_reg);
1807 temp &= ~FDI_SEL_PCDCLK;
1808 I915_WRITE(fdi_rx_reg, temp);
1809 I915_READ(fdi_rx_reg);
1810
1811 temp = I915_READ(fdi_rx_reg);
1812 temp &= ~FDI_RX_PLL_ENABLE;
1813 I915_WRITE(fdi_rx_reg, temp);
1814 I915_READ(fdi_rx_reg);
1815
1762 /* Disable CPU FDI TX PLL */ 1816 /* Disable CPU FDI TX PLL */
1763 temp = I915_READ(fdi_tx_reg); 1817 temp = I915_READ(fdi_tx_reg);
1764 if ((temp & FDI_TX_PLL_ENABLE) != 0) { 1818 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1767 udelay(100); 1821 udelay(100);
1768 } 1822 }
1769 1823
1770 /* Disable PF */
1771 temp = I915_READ(pf_ctl_reg);
1772 if ((temp & PF_ENABLE) != 0) {
1773 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1774 I915_READ(pf_ctl_reg);
1775 }
1776 I915_WRITE(pf_win_size, 0);
1777
1778 /* Wait for the clocks to turn off. */ 1824 /* Wait for the clocks to turn off. */
1779 udelay(150); 1825 udelay(100);
1780 break; 1826 break;
1781 } 1827 }
1782} 1828}
1783 1829
1830static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
1831{
1832 struct intel_overlay *overlay;
1833 int ret;
1834
1835 if (!enable && intel_crtc->overlay) {
1836 overlay = intel_crtc->overlay;
1837 mutex_lock(&overlay->dev->struct_mutex);
1838 for (;;) {
1839 ret = intel_overlay_switch_off(overlay);
1840 if (ret == 0)
1841 break;
1842
1843 ret = intel_overlay_recover_from_interrupt(overlay, 0);
1844 if (ret != 0) {
1845 /* overlay doesn't react anymore. Usually
1846 * results in a black screen and an unkillable
1847 * X server. */
1848 BUG();
1849 overlay->hw_wedged = HW_WEDGED;
1850 break;
1851 }
1852 }
1853 mutex_unlock(&overlay->dev->struct_mutex);
1854 }
1855 /* Let userspace switch the overlay on again. In most cases userspace
1856 * has to recompute where to put it anyway. */
1857
1858 return;
1859}
1860
1784static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 1861static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1785{ 1862{
1786 struct drm_device *dev = crtc->dev; 1863 struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1839 intel_update_fbc(crtc, &crtc->mode); 1916 intel_update_fbc(crtc, &crtc->mode);
1840 1917
1841 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1918 /* Give the overlay scaler a chance to enable if it's on this pipe */
1842 //intel_crtc_dpms_video(crtc, true); TODO 1919 intel_crtc_dpms_overlay(intel_crtc, true);
1843 break; 1920 break;
1844 case DRM_MODE_DPMS_OFF: 1921 case DRM_MODE_DPMS_OFF:
1845 intel_update_watermarks(dev); 1922 intel_update_watermarks(dev);
1923
1846 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1924 /* Give the overlay scaler a chance to disable if it's on this pipe */
1847 //intel_crtc_dpms_video(crtc, FALSE); TODO 1925 intel_crtc_dpms_overlay(intel_crtc, false);
1926 drm_vblank_off(dev, pipe);
1848 1927
1849 if (dev_priv->cfb_plane == plane && 1928 if (dev_priv->cfb_plane == plane &&
1850 dev_priv->display.disable_fbc) 1929 dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1963 struct drm_display_mode *adjusted_mode) 2042 struct drm_display_mode *adjusted_mode)
1964{ 2043{
1965 struct drm_device *dev = crtc->dev; 2044 struct drm_device *dev = crtc->dev;
1966 if (IS_IGDNG(dev)) { 2045 if (IS_IRONLAKE(dev)) {
1967 /* FDI link clock is fixed at 2.7G */ 2046 /* FDI link clock is fixed at 2.7G */
1968 if (mode->clock * 3 > 27000 * 4) 2047 if (mode->clock * 3 > 27000 * 4)
1969 return MODE_CLOCK_HIGH; 2048 return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
2039 * Return the pipe currently connected to the panel fitter, 2118 * Return the pipe currently connected to the panel fitter,
2040 * or -1 if the panel fitter is not present or not in use 2119 * or -1 if the panel fitter is not present or not in use
2041 */ 2120 */
2042static int intel_panel_fitter_pipe (struct drm_device *dev) 2121int intel_panel_fitter_pipe (struct drm_device *dev)
2043{ 2122{
2044 struct drm_i915_private *dev_priv = dev->dev_private; 2123 struct drm_i915_private *dev_priv = dev->dev_private;
2045 u32 pfit_control; 2124 u32 pfit_control;
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2083#define LINK_N 0x80000 2162#define LINK_N 0x80000
2084 2163
2085static void 2164static void
2086igdng_compute_m_n(int bits_per_pixel, int nlanes, 2165ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2087 int pixel_clock, int link_clock, 2166 int link_clock, struct fdi_m_n *m_n)
2088 struct fdi_m_n *m_n)
2089{ 2167{
2090 u64 temp; 2168 u64 temp;
2091 2169
@@ -2113,34 +2191,34 @@ struct intel_watermark_params {
2113 unsigned long cacheline_size; 2191 unsigned long cacheline_size;
2114}; 2192};
2115 2193
2116/* IGD has different values for various configs */ 2194/* Pineview has different values for various configs */
2117static struct intel_watermark_params igd_display_wm = { 2195static struct intel_watermark_params pineview_display_wm = {
2118 IGD_DISPLAY_FIFO, 2196 PINEVIEW_DISPLAY_FIFO,
2119 IGD_MAX_WM, 2197 PINEVIEW_MAX_WM,
2120 IGD_DFT_WM, 2198 PINEVIEW_DFT_WM,
2121 IGD_GUARD_WM, 2199 PINEVIEW_GUARD_WM,
2122 IGD_FIFO_LINE_SIZE 2200 PINEVIEW_FIFO_LINE_SIZE
2123}; 2201};
2124static struct intel_watermark_params igd_display_hplloff_wm = { 2202static struct intel_watermark_params pineview_display_hplloff_wm = {
2125 IGD_DISPLAY_FIFO, 2203 PINEVIEW_DISPLAY_FIFO,
2126 IGD_MAX_WM, 2204 PINEVIEW_MAX_WM,
2127 IGD_DFT_HPLLOFF_WM, 2205 PINEVIEW_DFT_HPLLOFF_WM,
2128 IGD_GUARD_WM, 2206 PINEVIEW_GUARD_WM,
2129 IGD_FIFO_LINE_SIZE 2207 PINEVIEW_FIFO_LINE_SIZE
2130}; 2208};
2131static struct intel_watermark_params igd_cursor_wm = { 2209static struct intel_watermark_params pineview_cursor_wm = {
2132 IGD_CURSOR_FIFO, 2210 PINEVIEW_CURSOR_FIFO,
2133 IGD_CURSOR_MAX_WM, 2211 PINEVIEW_CURSOR_MAX_WM,
2134 IGD_CURSOR_DFT_WM, 2212 PINEVIEW_CURSOR_DFT_WM,
2135 IGD_CURSOR_GUARD_WM, 2213 PINEVIEW_CURSOR_GUARD_WM,
2136 IGD_FIFO_LINE_SIZE, 2214 PINEVIEW_FIFO_LINE_SIZE,
2137}; 2215};
2138static struct intel_watermark_params igd_cursor_hplloff_wm = { 2216static struct intel_watermark_params pineview_cursor_hplloff_wm = {
2139 IGD_CURSOR_FIFO, 2217 PINEVIEW_CURSOR_FIFO,
2140 IGD_CURSOR_MAX_WM, 2218 PINEVIEW_CURSOR_MAX_WM,
2141 IGD_CURSOR_DFT_WM, 2219 PINEVIEW_CURSOR_DFT_WM,
2142 IGD_CURSOR_GUARD_WM, 2220 PINEVIEW_CURSOR_GUARD_WM,
2143 IGD_FIFO_LINE_SIZE 2221 PINEVIEW_FIFO_LINE_SIZE
2144}; 2222};
2145static struct intel_watermark_params g4x_wm_info = { 2223static struct intel_watermark_params g4x_wm_info = {
2146 G4X_FIFO_SIZE, 2224 G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2213 1000; 2291 1000;
2214 entries_required /= wm->cacheline_size; 2292 entries_required /= wm->cacheline_size;
2215 2293
2216 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); 2294 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2217 2295
2218 wm_size = wm->fifo_size - (entries_required + wm->guard_size); 2296 wm_size = wm->fifo_size - (entries_required + wm->guard_size);
2219 2297
2220 DRM_DEBUG("FIFO watermark level: %d\n", wm_size); 2298 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
2221 2299
2222 /* Don't promote wm_size to unsigned... */ 2300 /* Don't promote wm_size to unsigned... */
2223 if (wm_size > (long)wm->max_wm) 2301 if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2279 return latency; 2357 return latency;
2280 } 2358 }
2281 2359
2282 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2360 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2283 2361
2284 return NULL; 2362 return NULL;
2285} 2363}
2286 2364
2287static void igd_disable_cxsr(struct drm_device *dev) 2365static void pineview_disable_cxsr(struct drm_device *dev)
2288{ 2366{
2289 struct drm_i915_private *dev_priv = dev->dev_private; 2367 struct drm_i915_private *dev_priv = dev->dev_private;
2290 u32 reg; 2368 u32 reg;
2291 2369
2292 /* deactivate cxsr */ 2370 /* deactivate cxsr */
2293 reg = I915_READ(DSPFW3); 2371 reg = I915_READ(DSPFW3);
2294 reg &= ~(IGD_SELF_REFRESH_EN); 2372 reg &= ~(PINEVIEW_SELF_REFRESH_EN);
2295 I915_WRITE(DSPFW3, reg); 2373 I915_WRITE(DSPFW3, reg);
2296 DRM_INFO("Big FIFO is disabled\n"); 2374 DRM_INFO("Big FIFO is disabled\n");
2297} 2375}
2298 2376
2299static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, 2377static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2300 int pixel_size) 2378 int pixel_size)
2301{ 2379{
2302 struct drm_i915_private *dev_priv = dev->dev_private; 2380 struct drm_i915_private *dev_priv = dev->dev_private;
2303 u32 reg; 2381 u32 reg;
2304 unsigned long wm; 2382 unsigned long wm;
2305 struct cxsr_latency *latency; 2383 struct cxsr_latency *latency;
2306 2384
2307 latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, 2385 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2308 dev_priv->mem_freq); 2386 dev_priv->mem_freq);
2309 if (!latency) { 2387 if (!latency) {
2310 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2388 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2311 igd_disable_cxsr(dev); 2389 pineview_disable_cxsr(dev);
2312 return; 2390 return;
2313 } 2391 }
2314 2392
2315 /* Display SR */ 2393 /* Display SR */
2316 wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, 2394 wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
2317 latency->display_sr); 2395 latency->display_sr);
2318 reg = I915_READ(DSPFW1); 2396 reg = I915_READ(DSPFW1);
2319 reg &= 0x7fffff; 2397 reg &= 0x7fffff;
2320 reg |= wm << 23; 2398 reg |= wm << 23;
2321 I915_WRITE(DSPFW1, reg); 2399 I915_WRITE(DSPFW1, reg);
2322 DRM_DEBUG("DSPFW1 register is %x\n", reg); 2400 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2323 2401
2324 /* cursor SR */ 2402 /* cursor SR */
2325 wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, 2403 wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
2326 latency->cursor_sr); 2404 latency->cursor_sr);
2327 reg = I915_READ(DSPFW3); 2405 reg = I915_READ(DSPFW3);
2328 reg &= ~(0x3f << 24); 2406 reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2330 I915_WRITE(DSPFW3, reg); 2408 I915_WRITE(DSPFW3, reg);
2331 2409
2332 /* Display HPLL off SR */ 2410 /* Display HPLL off SR */
2333 wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, 2411 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
2334 latency->display_hpll_disable, I915_FIFO_LINE_SIZE); 2412 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
2335 reg = I915_READ(DSPFW3); 2413 reg = I915_READ(DSPFW3);
2336 reg &= 0xfffffe00; 2414 reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2338 I915_WRITE(DSPFW3, reg); 2416 I915_WRITE(DSPFW3, reg);
2339 2417
2340 /* cursor HPLL off SR */ 2418 /* cursor HPLL off SR */
2341 wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, 2419 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
2342 latency->cursor_hpll_disable); 2420 latency->cursor_hpll_disable);
2343 reg = I915_READ(DSPFW3); 2421 reg = I915_READ(DSPFW3);
2344 reg &= ~(0x3f << 16); 2422 reg &= ~(0x3f << 16);
2345 reg |= (wm & 0x3f) << 16; 2423 reg |= (wm & 0x3f) << 16;
2346 I915_WRITE(DSPFW3, reg); 2424 I915_WRITE(DSPFW3, reg);
2347 DRM_DEBUG("DSPFW3 register is %x\n", reg); 2425 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2348 2426
2349 /* activate cxsr */ 2427 /* activate cxsr */
2350 reg = I915_READ(DSPFW3); 2428 reg = I915_READ(DSPFW3);
2351 reg |= IGD_SELF_REFRESH_EN; 2429 reg |= PINEVIEW_SELF_REFRESH_EN;
2352 I915_WRITE(DSPFW3, reg); 2430 I915_WRITE(DSPFW3, reg);
2353 2431
2354 DRM_INFO("Big FIFO is enabled\n"); 2432 DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2384 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - 2462 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2385 (dsparb & 0x7f); 2463 (dsparb & 0x7f);
2386 2464
2387 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2465 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2388 size); 2466 plane ? "B" : "A", size);
2389 2467
2390 return size; 2468 return size;
2391} 2469}
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2403 (dsparb & 0x1ff); 2481 (dsparb & 0x1ff);
2404 size >>= 1; /* Convert to cachelines */ 2482 size >>= 1; /* Convert to cachelines */
2405 2483
2406 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2484 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2407 size); 2485 plane ? "B" : "A", size);
2408 2486
2409 return size; 2487 return size;
2410} 2488}
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
2418 size = dsparb & 0x7f; 2496 size = dsparb & 0x7f;
2419 size >>= 2; /* Convert to cachelines */ 2497 size >>= 2; /* Convert to cachelines */
2420 2498
2421 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2499 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2500 plane ? "B" : "A",
2422 size); 2501 size);
2423 2502
2424 return size; 2503 return size;
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2433 size = dsparb & 0x7f; 2512 size = dsparb & 0x7f;
2434 size >>= 1; /* Convert to cachelines */ 2513 size >>= 1; /* Convert to cachelines */
2435 2514
2436 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2515 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2437 size); 2516 plane ? "B" : "A", size);
2438 2517
2439 return size; 2518 return size;
2440} 2519}
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2509 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 2588 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2510} 2589}
2511 2590
2512static void i965_update_wm(struct drm_device *dev, int unused, int unused2, 2591static void i965_update_wm(struct drm_device *dev, int planea_clock,
2513 int unused3, int unused4) 2592 int planeb_clock, int sr_hdisplay, int pixel_size)
2514{ 2593{
2515 struct drm_i915_private *dev_priv = dev->dev_private; 2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 unsigned long line_time_us;
2596 int sr_clock, sr_entries, srwm = 1;
2597
2598 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000;
2602
2603 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2605
2606 /* Use ns/us then divide to preserve precision */
2607 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2608 pixel_size * sr_hdisplay) / 1000;
2609 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
2610 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2611 srwm = I945_FIFO_SIZE - sr_entries;
2612 if (srwm < 0)
2613 srwm = 1;
2614 srwm &= 0x3f;
2615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2616 }
2516 2617
2517 DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); 2618 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2619 srwm);
2518 2620
2519 /* 965 has limitations... */ 2621 /* 965 has limitations... */
2520 I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); 2622 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
2623 (8 << 0));
2521 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 2624 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
2522} 2625}
2523 2626
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2553 pixel_size, latency_ns); 2656 pixel_size, latency_ns);
2554 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, 2657 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
2555 pixel_size, latency_ns); 2658 pixel_size, latency_ns);
2556 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2659 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2557 2660
2558 /* 2661 /*
2559 * Overlay gets an aggressive default since video jitter is bad. 2662 * Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2573 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 2676 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2574 pixel_size * sr_hdisplay) / 1000; 2677 pixel_size * sr_hdisplay) / 1000;
2575 sr_entries = roundup(sr_entries / cacheline_size, 1); 2678 sr_entries = roundup(sr_entries / cacheline_size, 1);
2576 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2679 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
2577 srwm = total_size - sr_entries; 2680 srwm = total_size - sr_entries;
2578 if (srwm < 0) 2681 if (srwm < 0)
2579 srwm = 1; 2682 srwm = 1;
2580 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2683 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2581 } 2684 }
2582 2685
2583 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2686 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2584 planea_wm, planeb_wm, cwm, srwm); 2687 planea_wm, planeb_wm, cwm, srwm);
2585 2688
2586 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2689 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2607 pixel_size, latency_ns); 2710 pixel_size, latency_ns);
2608 fwater_lo |= (3<<8) | planea_wm; 2711 fwater_lo |= (3<<8) | planea_wm;
2609 2712
2610 DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); 2713 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2611 2714
2612 I915_WRITE(FW_BLC, fwater_lo); 2715 I915_WRITE(FW_BLC, fwater_lo);
2613} 2716}
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev)
2661 if (crtc->enabled) { 2764 if (crtc->enabled) {
2662 enabled++; 2765 enabled++;
2663 if (intel_crtc->plane == 0) { 2766 if (intel_crtc->plane == 0) {
2664 DRM_DEBUG("plane A (pipe %d) clock: %d\n", 2767 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
2665 intel_crtc->pipe, crtc->mode.clock); 2768 intel_crtc->pipe, crtc->mode.clock);
2666 planea_clock = crtc->mode.clock; 2769 planea_clock = crtc->mode.clock;
2667 } else { 2770 } else {
2668 DRM_DEBUG("plane B (pipe %d) clock: %d\n", 2771 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
2669 intel_crtc->pipe, crtc->mode.clock); 2772 intel_crtc->pipe, crtc->mode.clock);
2670 planeb_clock = crtc->mode.clock; 2773 planeb_clock = crtc->mode.clock;
2671 } 2774 }
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev)
2682 return; 2785 return;
2683 2786
2684 /* Single plane configs can enable self refresh */ 2787 /* Single plane configs can enable self refresh */
2685 if (enabled == 1 && IS_IGD(dev)) 2788 if (enabled == 1 && IS_PINEVIEW(dev))
2686 igd_enable_cxsr(dev, sr_clock, pixel_size); 2789 pineview_enable_cxsr(dev, sr_clock, pixel_size);
2687 else if (IS_IGD(dev)) 2790 else if (IS_PINEVIEW(dev))
2688 igd_disable_cxsr(dev); 2791 pineview_disable_cxsr(dev);
2689 2792
2690 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 2793 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2691 sr_hdisplay, pixel_size); 2794 sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2779 2882
2780 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2883 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
2781 refclk = dev_priv->lvds_ssc_freq * 1000; 2884 refclk = dev_priv->lvds_ssc_freq * 1000;
2782 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 2885 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2886 refclk / 1000);
2783 } else if (IS_I9XX(dev)) { 2887 } else if (IS_I9XX(dev)) {
2784 refclk = 96000; 2888 refclk = 96000;
2785 if (IS_IGDNG(dev)) 2889 if (IS_IRONLAKE(dev))
2786 refclk = 120000; /* 120Mhz refclk */ 2890 refclk = 120000; /* 120Mhz refclk */
2787 } else { 2891 } else {
2788 refclk = 48000; 2892 refclk = 48000;
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2802 return -EINVAL; 2906 return -EINVAL;
2803 } 2907 }
2804 2908
2805 if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) { 2909 if (is_lvds && limit->find_reduced_pll &&
2910 dev_priv->lvds_downclock_avail) {
2806 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); 2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2807 has_reduced_clock = limit->find_reduced_pll(limit, crtc, 2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2808 (adjusted_mode->clock*3/4), 2913 dev_priv->lvds_downclock,
2809 refclk, 2914 refclk,
2810 &reduced_clock); 2915 &reduced_clock);
2916 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
2917 /*
2918 * If the different P is found, it means that we can't
2919 * switch the display clock by using the FP0/FP1.
2920 * In such case we will disable the LVDS downclock
2921 * feature.
2922 */
2923 DRM_DEBUG_KMS("Different P is found for "
2924 "LVDS clock/downclock\n");
2925 has_reduced_clock = 0;
2926 }
2811 } 2927 }
2812
2813 /* SDVO TV has fixed PLL values depend on its clock range, 2928 /* SDVO TV has fixed PLL values depend on its clock range,
2814 this mirrors vbios setting. */ 2929 this mirrors vbios setting. */
2815 if (is_sdvo && is_tv) { 2930 if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2831 } 2946 }
2832 2947
2833 /* FDI link */ 2948 /* FDI link */
2834 if (IS_IGDNG(dev)) { 2949 if (IS_IRONLAKE(dev)) {
2835 int lane, link_bw, bpp; 2950 int lane, link_bw, bpp;
2836 /* eDP doesn't require FDI link, so just set DP M/N 2951 /* eDP doesn't require FDI link, so just set DP M/N
2837 according to current link config */ 2952 according to current link config */
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2873 bpp = 24; 2988 bpp = 24;
2874 } 2989 }
2875 2990
2876 igdng_compute_m_n(bpp, lane, target_clock, 2991 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
2877 link_bw, &m_n);
2878 } 2992 }
2879 2993
2880 /* Ironlake: try to setup display ref clock before DPLL 2994 /* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2882 * PCH B stepping, previous chipset stepping should be 2996 * PCH B stepping, previous chipset stepping should be
2883 * ignoring this setting. 2997 * ignoring this setting.
2884 */ 2998 */
2885 if (IS_IGDNG(dev)) { 2999 if (IS_IRONLAKE(dev)) {
2886 temp = I915_READ(PCH_DREF_CONTROL); 3000 temp = I915_READ(PCH_DREF_CONTROL);
2887 /* Always enable nonspread source */ 3001 /* Always enable nonspread source */
2888 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3002 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2917 } 3031 }
2918 } 3032 }
2919 3033
2920 if (IS_IGD(dev)) { 3034 if (IS_PINEVIEW(dev)) {
2921 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 3035 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2922 if (has_reduced_clock) 3036 if (has_reduced_clock)
2923 fp2 = (1 << reduced_clock.n) << 16 | 3037 fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2929 reduced_clock.m2; 3043 reduced_clock.m2;
2930 } 3044 }
2931 3045
2932 if (!IS_IGDNG(dev)) 3046 if (!IS_IRONLAKE(dev))
2933 dpll = DPLL_VGA_MODE_DIS; 3047 dpll = DPLL_VGA_MODE_DIS;
2934 3048
2935 if (IS_I9XX(dev)) { 3049 if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3056 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2943 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3057 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2944 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3058 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
2945 else if (IS_IGDNG(dev)) 3059 else if (IS_IRONLAKE(dev))
2946 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3060 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2947 } 3061 }
2948 if (is_dp) 3062 if (is_dp)
2949 dpll |= DPLL_DVO_HIGH_SPEED; 3063 dpll |= DPLL_DVO_HIGH_SPEED;
2950 3064
2951 /* compute bitmask from p1 value */ 3065 /* compute bitmask from p1 value */
2952 if (IS_IGD(dev)) 3066 if (IS_PINEVIEW(dev))
2953 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 3067 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
2954 else { 3068 else {
2955 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3069 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2956 /* also FPA1 */ 3070 /* also FPA1 */
2957 if (IS_IGDNG(dev)) 3071 if (IS_IRONLAKE(dev))
2958 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3072 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2959 if (IS_G4X(dev) && has_reduced_clock) 3073 if (IS_G4X(dev) && has_reduced_clock)
2960 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3074 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2973 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3087 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
2974 break; 3088 break;
2975 } 3089 }
2976 if (IS_I965G(dev) && !IS_IGDNG(dev)) 3090 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
2977 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3091 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2978 } else { 3092 } else {
2979 if (is_lvds) { 3093 if (is_lvds) {
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3005 /* Set up the display plane register */ 3119 /* Set up the display plane register */
3006 dspcntr = DISPPLANE_GAMMA_ENABLE; 3120 dspcntr = DISPPLANE_GAMMA_ENABLE;
3007 3121
3008 /* IGDNG's plane is forced to pipe, bit 24 is to 3122 /* Ironlake's plane is forced to pipe, bit 24 is to
3009 enable color space conversion */ 3123 enable color space conversion */
3010 if (!IS_IGDNG(dev)) { 3124 if (!IS_IRONLAKE(dev)) {
3011 if (pipe == 0) 3125 if (pipe == 0)
3012 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3126 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3013 else 3127 else
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3034 3148
3035 3149
3036 /* Disable the panel fitter if it was on our pipe */ 3150 /* Disable the panel fitter if it was on our pipe */
3037 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe) 3151 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
3038 I915_WRITE(PFIT_CONTROL, 0); 3152 I915_WRITE(PFIT_CONTROL, 0);
3039 3153
3040 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3154 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3041 drm_mode_debug_printmodeline(mode); 3155 drm_mode_debug_printmodeline(mode);
3042 3156
3043 /* assign to IGDNG registers */ 3157 /* assign to Ironlake registers */
3044 if (IS_IGDNG(dev)) { 3158 if (IS_IRONLAKE(dev)) {
3045 fp_reg = pch_fp_reg; 3159 fp_reg = pch_fp_reg;
3046 dpll_reg = pch_dpll_reg; 3160 dpll_reg = pch_dpll_reg;
3047 } 3161 }
3048 3162
3049 if (is_edp) { 3163 if (is_edp) {
3050 igdng_disable_pll_edp(crtc); 3164 ironlake_disable_pll_edp(crtc);
3051 } else if ((dpll & DPLL_VCO_ENABLE)) { 3165 } else if ((dpll & DPLL_VCO_ENABLE)) {
3052 I915_WRITE(fp_reg, fp); 3166 I915_WRITE(fp_reg, fp);
3053 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3167 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3062 if (is_lvds) { 3176 if (is_lvds) {
3063 u32 lvds; 3177 u32 lvds;
3064 3178
3065 if (IS_IGDNG(dev)) 3179 if (IS_IRONLAKE(dev))
3066 lvds_reg = PCH_LVDS; 3180 lvds_reg = PCH_LVDS;
3067 3181
3068 lvds = I915_READ(lvds_reg); 3182 lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3095 /* Wait for the clocks to stabilize. */ 3209 /* Wait for the clocks to stabilize. */
3096 udelay(150); 3210 udelay(150);
3097 3211
3098 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 3212 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
3099 if (is_sdvo) { 3213 if (is_sdvo) {
3100 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3214 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3101 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3215 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3115 I915_WRITE(fp_reg + 4, fp2); 3229 I915_WRITE(fp_reg + 4, fp2);
3116 intel_crtc->lowfreq_avail = true; 3230 intel_crtc->lowfreq_avail = true;
3117 if (HAS_PIPE_CXSR(dev)) { 3231 if (HAS_PIPE_CXSR(dev)) {
3118 DRM_DEBUG("enabling CxSR downclocking\n"); 3232 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
3119 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3233 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
3120 } 3234 }
3121 } else { 3235 } else {
3122 I915_WRITE(fp_reg + 4, fp); 3236 I915_WRITE(fp_reg + 4, fp);
3123 intel_crtc->lowfreq_avail = false; 3237 intel_crtc->lowfreq_avail = false;
3124 if (HAS_PIPE_CXSR(dev)) { 3238 if (HAS_PIPE_CXSR(dev)) {
3125 DRM_DEBUG("disabling CxSR downclocking\n"); 3239 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
3126 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3240 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3127 } 3241 }
3128 } 3242 }
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3142 /* pipesrc and dspsize control the size that is scaled from, which should 3256 /* pipesrc and dspsize control the size that is scaled from, which should
3143 * always be the user's requested size. 3257 * always be the user's requested size.
3144 */ 3258 */
3145 if (!IS_IGDNG(dev)) { 3259 if (!IS_IRONLAKE(dev)) {
3146 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3260 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3147 (mode->hdisplay - 1)); 3261 (mode->hdisplay - 1));
3148 I915_WRITE(dsppos_reg, 0); 3262 I915_WRITE(dsppos_reg, 0);
3149 } 3263 }
3150 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3264 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3151 3265
3152 if (IS_IGDNG(dev)) { 3266 if (IS_IRONLAKE(dev)) {
3153 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3267 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3154 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3268 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3155 I915_WRITE(link_m1_reg, m_n.link_m); 3269 I915_WRITE(link_m1_reg, m_n.link_m);
3156 I915_WRITE(link_n1_reg, m_n.link_n); 3270 I915_WRITE(link_n1_reg, m_n.link_n);
3157 3271
3158 if (is_edp) { 3272 if (is_edp) {
3159 igdng_set_pll_edp(crtc, adjusted_mode->clock); 3273 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
3160 } else { 3274 } else {
3161 /* enable FDI RX PLL too */ 3275 /* enable FDI RX PLL too */
3162 temp = I915_READ(fdi_rx_reg); 3276 temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3170 3284
3171 intel_wait_for_vblank(dev); 3285 intel_wait_for_vblank(dev);
3172 3286
3173 if (IS_IGDNG(dev)) { 3287 if (IS_IRONLAKE(dev)) {
3174 /* enable address swizzle for tiling buffer */ 3288 /* enable address swizzle for tiling buffer */
3175 temp = I915_READ(DISP_ARB_CTL); 3289 temp = I915_READ(DISP_ARB_CTL);
3176 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 3290 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3204 if (!crtc->enabled) 3318 if (!crtc->enabled)
3205 return; 3319 return;
3206 3320
3207 /* use legacy palette for IGDNG */ 3321 /* use legacy palette for Ironlake */
3208 if (IS_IGDNG(dev)) 3322 if (IS_IRONLAKE(dev))
3209 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3323 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3210 LGC_PALETTE_B; 3324 LGC_PALETTE_B;
3211 3325
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3234 size_t addr; 3348 size_t addr;
3235 int ret; 3349 int ret;
3236 3350
3237 DRM_DEBUG("\n"); 3351 DRM_DEBUG_KMS("\n");
3238 3352
3239 /* if we want to turn off the cursor ignore width and height */ 3353 /* if we want to turn off the cursor ignore width and height */
3240 if (!handle) { 3354 if (!handle) {
3241 DRM_DEBUG("cursor off\n"); 3355 DRM_DEBUG_KMS("cursor off\n");
3242 if (IS_MOBILE(dev) || IS_I9XX(dev)) { 3356 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3243 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 3357 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
3244 temp |= CURSOR_MODE_DISABLE; 3358 temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3546 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 3660 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
3547 3661
3548 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 3662 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
3549 if (IS_IGD(dev)) { 3663 if (IS_PINEVIEW(dev)) {
3550 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 3664 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
3551 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; 3665 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
3552 } else { 3666 } else {
3553 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 3667 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
3554 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 3668 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
3555 } 3669 }
3556 3670
3557 if (IS_I9XX(dev)) { 3671 if (IS_I9XX(dev)) {
3558 if (IS_IGD(dev)) 3672 if (IS_PINEVIEW(dev))
3559 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> 3673 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
3560 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); 3674 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
3561 else 3675 else
3562 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 3676 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
3563 DPLL_FPA01_P1_POST_DIV_SHIFT); 3677 DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3572 7 : 14; 3686 7 : 14;
3573 break; 3687 break;
3574 default: 3688 default:
3575 DRM_DEBUG("Unknown DPLL mode %08x in programmed " 3689 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
3576 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 3690 "mode\n", (int)(dpll & DPLL_MODE_MASK));
3577 return 0; 3691 return 0;
3578 } 3692 }
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
3658 struct drm_device *dev = (struct drm_device *)arg; 3772 struct drm_device *dev = (struct drm_device *)arg;
3659 drm_i915_private_t *dev_priv = dev->dev_private; 3773 drm_i915_private_t *dev_priv = dev->dev_private;
3660 3774
3661 DRM_DEBUG("idle timer fired, downclocking\n"); 3775 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3662 3776
3663 dev_priv->busy = false; 3777 dev_priv->busy = false;
3664 3778
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3669{ 3783{
3670 drm_i915_private_t *dev_priv = dev->dev_private; 3784 drm_i915_private_t *dev_priv = dev->dev_private;
3671 3785
3672 if (IS_IGDNG(dev)) 3786 if (IS_IRONLAKE(dev))
3673 return; 3787 return;
3674 3788
3675 if (!dev_priv->render_reclock_avail) { 3789 if (!dev_priv->render_reclock_avail) {
3676 DRM_DEBUG("not reclocking render clock\n"); 3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3677 return; 3791 return;
3678 } 3792 }
3679 3793
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3682 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); 3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3683 else if (IS_I85X(dev)) 3797 else if (IS_I85X(dev))
3684 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); 3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3685 DRM_DEBUG("increasing render clock frequency\n"); 3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3686 3800
3687 /* Schedule downclock */ 3801 /* Schedule downclock */
3688 if (schedule) 3802 if (schedule)
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev)
3694{ 3808{
3695 drm_i915_private_t *dev_priv = dev->dev_private; 3809 drm_i915_private_t *dev_priv = dev->dev_private;
3696 3810
3697 if (IS_IGDNG(dev)) 3811 if (IS_IRONLAKE(dev))
3698 return; 3812 return;
3699 3813
3700 if (!dev_priv->render_reclock_avail) { 3814 if (!dev_priv->render_reclock_avail) {
3701 DRM_DEBUG("not reclocking render clock\n"); 3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3702 return; 3816 return;
3703 } 3817 }
3704 3818
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
3758 3872
3759 pci_write_config_word(dev->pdev, HPLLCC, hpllcc); 3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3760 } 3874 }
3761 DRM_DEBUG("decreasing render clock frequency\n"); 3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3762} 3876}
3763 3877
3764/* Note that no increase function is needed for this - increase_renderclock() 3878/* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
3766 */ 3880 */
3767void intel_decrease_displayclock(struct drm_device *dev) 3881void intel_decrease_displayclock(struct drm_device *dev)
3768{ 3882{
3769 if (IS_IGDNG(dev)) 3883 if (IS_IRONLAKE(dev))
3770 return; 3884 return;
3771 3885
3772 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || 3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
3792 struct drm_crtc *crtc = &intel_crtc->base; 3906 struct drm_crtc *crtc = &intel_crtc->base;
3793 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 3907 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
3794 3908
3795 DRM_DEBUG("idle timer fired, downclocking\n"); 3909 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3796 3910
3797 intel_crtc->busy = false; 3911 intel_crtc->busy = false;
3798 3912
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3808 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3809 int dpll = I915_READ(dpll_reg); 3923 int dpll = I915_READ(dpll_reg);
3810 3924
3811 if (IS_IGDNG(dev)) 3925 if (IS_IRONLAKE(dev))
3812 return; 3926 return;
3813 3927
3814 if (!dev_priv->lvds_downclock_avail) 3928 if (!dev_priv->lvds_downclock_avail)
3815 return; 3929 return;
3816 3930
3817 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 3931 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
3818 DRM_DEBUG("upclocking LVDS\n"); 3932 DRM_DEBUG_DRIVER("upclocking LVDS\n");
3819 3933
3820 /* Unlock panel regs */ 3934 /* Unlock panel regs */
3821 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3935 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3826 intel_wait_for_vblank(dev); 3940 intel_wait_for_vblank(dev);
3827 dpll = I915_READ(dpll_reg); 3941 dpll = I915_READ(dpll_reg);
3828 if (dpll & DISPLAY_RATE_SELECT_FPA1) 3942 if (dpll & DISPLAY_RATE_SELECT_FPA1)
3829 DRM_DEBUG("failed to upclock LVDS!\n"); 3943 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
3830 3944
3831 /* ...and lock them again */ 3945 /* ...and lock them again */
3832 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 3946 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3847 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3848 int dpll = I915_READ(dpll_reg); 3962 int dpll = I915_READ(dpll_reg);
3849 3963
3850 if (IS_IGDNG(dev)) 3964 if (IS_IRONLAKE(dev))
3851 return; 3965 return;
3852 3966
3853 if (!dev_priv->lvds_downclock_avail) 3967 if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3858 * the manual case. 3972 * the manual case.
3859 */ 3973 */
3860 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 3974 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
3861 DRM_DEBUG("downclocking LVDS\n"); 3975 DRM_DEBUG_DRIVER("downclocking LVDS\n");
3862 3976
3863 /* Unlock panel regs */ 3977 /* Unlock panel regs */
3864 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3978 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3869 intel_wait_for_vblank(dev); 3983 intel_wait_for_vblank(dev);
3870 dpll = I915_READ(dpll_reg); 3984 dpll = I915_READ(dpll_reg);
3871 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 3985 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
3872 DRM_DEBUG("failed to downclock LVDS!\n"); 3986 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
3873 3987
3874 /* ...and lock them again */ 3988 /* ...and lock them again */
3875 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 3989 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
3936 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4050 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3937 return; 4051 return;
3938 4052
3939 dev_priv->busy = true; 4053 if (!dev_priv->busy) {
3940 intel_increase_renderclock(dev, true); 4054 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true);
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
3941 4060
3942 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3943 if (!crtc->fb) 4062 if (!crtc->fb)
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3967 kfree(intel_crtc); 4086 kfree(intel_crtc);
3968} 4087}
3969 4088
4089struct intel_unpin_work {
4090 struct work_struct work;
4091 struct drm_device *dev;
4092 struct drm_gem_object *obj;
4093 struct drm_pending_vblank_event *event;
4094 int pending;
4095};
4096
4097static void intel_unpin_work_fn(struct work_struct *__work)
4098{
4099 struct intel_unpin_work *work =
4100 container_of(__work, struct intel_unpin_work, work);
4101
4102 mutex_lock(&work->dev->struct_mutex);
4103 i915_gem_object_unpin(work->obj);
4104 drm_gem_object_unreference(work->obj);
4105 mutex_unlock(&work->dev->struct_mutex);
4106 kfree(work);
4107}
4108
4109void intel_finish_page_flip(struct drm_device *dev, int pipe)
4110{
4111 drm_i915_private_t *dev_priv = dev->dev_private;
4112 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4114 struct intel_unpin_work *work;
4115 struct drm_i915_gem_object *obj_priv;
4116 struct drm_pending_vblank_event *e;
4117 struct timeval now;
4118 unsigned long flags;
4119
4120 /* Ignore early vblank irqs */
4121 if (intel_crtc == NULL)
4122 return;
4123
4124 spin_lock_irqsave(&dev->event_lock, flags);
4125 work = intel_crtc->unpin_work;
4126 if (work == NULL || !work->pending) {
4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4128 return;
4129 }
4130
4131 intel_crtc->unpin_work = NULL;
4132 drm_vblank_put(dev, intel_crtc->pipe);
4133
4134 if (work->event) {
4135 e = work->event;
4136 do_gettimeofday(&now);
4137 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
4138 e->event.tv_sec = now.tv_sec;
4139 e->event.tv_usec = now.tv_usec;
4140 list_add_tail(&e->base.link,
4141 &e->base.file_priv->event_list);
4142 wake_up_interruptible(&e->base.file_priv->event_wait);
4143 }
4144
4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4146
4147 obj_priv = work->obj->driver_private;
4148 if (atomic_dec_and_test(&obj_priv->pending_flip))
4149 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4150 schedule_work(&work->work);
4151}
4152
4153void intel_prepare_page_flip(struct drm_device *dev, int plane)
4154{
4155 drm_i915_private_t *dev_priv = dev->dev_private;
4156 struct intel_crtc *intel_crtc =
4157 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
4158 unsigned long flags;
4159
4160 spin_lock_irqsave(&dev->event_lock, flags);
4161 if (intel_crtc->unpin_work)
4162 intel_crtc->unpin_work->pending = 1;
4163 spin_unlock_irqrestore(&dev->event_lock, flags);
4164}
4165
4166static int intel_crtc_page_flip(struct drm_crtc *crtc,
4167 struct drm_framebuffer *fb,
4168 struct drm_pending_vblank_event *event)
4169{
4170 struct drm_device *dev = crtc->dev;
4171 struct drm_i915_private *dev_priv = dev->dev_private;
4172 struct intel_framebuffer *intel_fb;
4173 struct drm_i915_gem_object *obj_priv;
4174 struct drm_gem_object *obj;
4175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176 struct intel_unpin_work *work;
4177 unsigned long flags;
4178 int ret;
4179 RING_LOCALS;
4180
4181 work = kzalloc(sizeof *work, GFP_KERNEL);
4182 if (work == NULL)
4183 return -ENOMEM;
4184
4185 mutex_lock(&dev->struct_mutex);
4186
4187 work->event = event;
4188 work->dev = crtc->dev;
4189 intel_fb = to_intel_framebuffer(crtc->fb);
4190 work->obj = intel_fb->obj;
4191 INIT_WORK(&work->work, intel_unpin_work_fn);
4192
4193 /* We borrow the event spin lock for protecting unpin_work */
4194 spin_lock_irqsave(&dev->event_lock, flags);
4195 if (intel_crtc->unpin_work) {
4196 spin_unlock_irqrestore(&dev->event_lock, flags);
4197 kfree(work);
4198 mutex_unlock(&dev->struct_mutex);
4199 return -EBUSY;
4200 }
4201 intel_crtc->unpin_work = work;
4202 spin_unlock_irqrestore(&dev->event_lock, flags);
4203
4204 intel_fb = to_intel_framebuffer(fb);
4205 obj = intel_fb->obj;
4206
4207 ret = intel_pin_and_fence_fb_obj(dev, obj);
4208 if (ret != 0) {
4209 kfree(work);
4210 mutex_unlock(&dev->struct_mutex);
4211 return ret;
4212 }
4213
4214 /* Reference the old fb object for the scheduled work. */
4215 drm_gem_object_reference(work->obj);
4216
4217 crtc->fb = fb;
4218 i915_gem_object_flush_write_domain(obj);
4219 drm_vblank_get(dev, intel_crtc->pipe);
4220 obj_priv = obj->driver_private;
4221 atomic_inc(&obj_priv->pending_flip);
4222
4223 BEGIN_LP_RING(4);
4224 OUT_RING(MI_DISPLAY_FLIP |
4225 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4226 OUT_RING(fb->pitch);
4227 if (IS_I965G(dev)) {
4228 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4229 OUT_RING((fb->width << 16) | fb->height);
4230 } else {
4231 OUT_RING(obj_priv->gtt_offset);
4232 OUT_RING(MI_NOOP);
4233 }
4234 ADVANCE_LP_RING();
4235
4236 mutex_unlock(&dev->struct_mutex);
4237
4238 return 0;
4239}
4240
3970static const struct drm_crtc_helper_funcs intel_helper_funcs = { 4241static const struct drm_crtc_helper_funcs intel_helper_funcs = {
3971 .dpms = intel_crtc_dpms, 4242 .dpms = intel_crtc_dpms,
3972 .mode_fixup = intel_crtc_mode_fixup, 4243 .mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
3983 .gamma_set = intel_crtc_gamma_set, 4254 .gamma_set = intel_crtc_gamma_set,
3984 .set_config = drm_crtc_helper_set_config, 4255 .set_config = drm_crtc_helper_set_config,
3985 .destroy = intel_crtc_destroy, 4256 .destroy = intel_crtc_destroy,
4257 .page_flip = intel_crtc_page_flip,
3986}; 4258};
3987 4259
3988 4260
3989static void intel_crtc_init(struct drm_device *dev, int pipe) 4261static void intel_crtc_init(struct drm_device *dev, int pipe)
3990{ 4262{
4263 drm_i915_private_t *dev_priv = dev->dev_private;
3991 struct intel_crtc *intel_crtc; 4264 struct intel_crtc *intel_crtc;
3992 int i; 4265 int i;
3993 4266
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
4010 intel_crtc->pipe = pipe; 4283 intel_crtc->pipe = pipe;
4011 intel_crtc->plane = pipe; 4284 intel_crtc->plane = pipe;
4012 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 4285 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
4013 DRM_DEBUG("swapping pipes & planes for FBC\n"); 4286 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
4014 intel_crtc->plane = ((pipe == 0) ? 1 : 0); 4287 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
4015 } 4288 }
4016 4289
4290 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
4291 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
4292 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
4293 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
4294
4017 intel_crtc->cursor_addr = 0; 4295 intel_crtc->cursor_addr = 0;
4018 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4296 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4019 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 4297 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4090 if (IS_MOBILE(dev) && !IS_I830(dev)) 4368 if (IS_MOBILE(dev) && !IS_I830(dev))
4091 intel_lvds_init(dev); 4369 intel_lvds_init(dev);
4092 4370
4093 if (IS_IGDNG(dev)) { 4371 if (IS_IRONLAKE(dev)) {
4094 int found; 4372 int found;
4095 4373
4096 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4374 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4118 if (I915_READ(PCH_DP_D) & DP_DETECTED) 4396 if (I915_READ(PCH_DP_D) & DP_DETECTED)
4119 intel_dp_init(dev, PCH_DP_D); 4397 intel_dp_init(dev, PCH_DP_D);
4120 4398
4121 } else if (IS_I9XX(dev)) { 4399 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
4122 bool found = false; 4400 bool found = false;
4123 4401
4124 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4402 if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev)
4145 4423
4146 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
4147 intel_dp_init(dev, DP_D); 4425 intel_dp_init(dev, DP_D);
4148 } else 4426 } else if (IS_I8XX(dev))
4149 intel_dvo_init(dev); 4427 intel_dvo_init(dev);
4150 4428
4151 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) 4429 if (SUPPORTS_TV(dev))
4152 intel_tv_init(dev); 4430 intel_tv_init(dev);
4153 4431
4154 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4432 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4257 * Disable clock gating reported to work incorrectly according to the 4535 * Disable clock gating reported to work incorrectly according to the
4258 * specs, but enable as much else as we can. 4536 * specs, but enable as much else as we can.
4259 */ 4537 */
4260 if (IS_IGDNG(dev)) { 4538 if (IS_IRONLAKE(dev)) {
4261 return; 4539 return;
4262 } else if (IS_G4X(dev)) { 4540 } else if (IS_G4X(dev)) {
4263 uint32_t dspclk_gate; 4541 uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@ void intel_init_clock_gating(struct drm_device *dev)
4291 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 4569 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4292 DSTATE_DOT_CLOCK_GATING; 4570 DSTATE_DOT_CLOCK_GATING;
4293 I915_WRITE(D_STATE, dstate); 4571 I915_WRITE(D_STATE, dstate);
4294 } else if (IS_I855(dev) || IS_I865G(dev)) { 4572 } else if (IS_I85X(dev) || IS_I865G(dev)) {
4295 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 4573 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4296 } else if (IS_I830(dev)) { 4574 } else if (IS_I830(dev)) {
4297 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 4575 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4298 } 4576 }
4577
4578 /*
4579 * GPU can automatically power down the render unit if given a page
4580 * to save state.
4581 */
4582 if (I915_HAS_RC6(dev)) {
4583 struct drm_gem_object *pwrctx;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586
4587 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096);
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596
4597 ret = i915_gem_object_pin(pwrctx, 4096);
4598 if (ret) {
4599 DRM_ERROR("failed to pin power context: %d\n",
4600 ret);
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 }
4610
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4612 I915_WRITE(MCHBAR_RENDER_STANDBY,
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4614 }
4615
4616out:
4617 return;
4299} 4618}
4300 4619
4301/* Set up chip specific display functions */ 4620/* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@ static void intel_init_display(struct drm_device *dev)
4304 struct drm_i915_private *dev_priv = dev->dev_private; 4623 struct drm_i915_private *dev_priv = dev->dev_private;
4305 4624
4306 /* We always want a DPMS function */ 4625 /* We always want a DPMS function */
4307 if (IS_IGDNG(dev)) 4626 if (IS_IRONLAKE(dev))
4308 dev_priv->display.dpms = igdng_crtc_dpms; 4627 dev_priv->display.dpms = ironlake_crtc_dpms;
4309 else 4628 else
4310 dev_priv->display.dpms = i9xx_crtc_dpms; 4629 dev_priv->display.dpms = i9xx_crtc_dpms;
4311 4630
@@ -4324,13 +4643,13 @@ static void intel_init_display(struct drm_device *dev)
4324 } 4643 }
4325 4644
4326 /* Returns the core display clock speed */ 4645 /* Returns the core display clock speed */
4327 if (IS_I945G(dev)) 4646 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
4328 dev_priv->display.get_display_clock_speed = 4647 dev_priv->display.get_display_clock_speed =
4329 i945_get_display_clock_speed; 4648 i945_get_display_clock_speed;
4330 else if (IS_I915G(dev)) 4649 else if (IS_I915G(dev))
4331 dev_priv->display.get_display_clock_speed = 4650 dev_priv->display.get_display_clock_speed =
4332 i915_get_display_clock_speed; 4651 i915_get_display_clock_speed;
4333 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) 4652 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
4334 dev_priv->display.get_display_clock_speed = 4653 dev_priv->display.get_display_clock_speed =
4335 i9xx_misc_get_display_clock_speed; 4654 i9xx_misc_get_display_clock_speed;
4336 else if (IS_I915GM(dev)) 4655 else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@ static void intel_init_display(struct drm_device *dev)
4339 else if (IS_I865G(dev)) 4658 else if (IS_I865G(dev))
4340 dev_priv->display.get_display_clock_speed = 4659 dev_priv->display.get_display_clock_speed =
4341 i865_get_display_clock_speed; 4660 i865_get_display_clock_speed;
4342 else if (IS_I855(dev)) 4661 else if (IS_I85X(dev))
4343 dev_priv->display.get_display_clock_speed = 4662 dev_priv->display.get_display_clock_speed =
4344 i855_get_display_clock_speed; 4663 i855_get_display_clock_speed;
4345 else /* 852, 830 */ 4664 else /* 852, 830 */
@@ -4347,7 +4666,7 @@ static void intel_init_display(struct drm_device *dev)
4347 i830_get_display_clock_speed; 4666 i830_get_display_clock_speed;
4348 4667
4349 /* For FIFO watermark updates */ 4668 /* For FIFO watermark updates */
4350 if (IS_IGDNG(dev)) 4669 if (IS_IRONLAKE(dev))
4351 dev_priv->display.update_wm = NULL; 4670 dev_priv->display.update_wm = NULL;
4352 else if (IS_G4X(dev)) 4671 else if (IS_G4X(dev))
4353 dev_priv->display.update_wm = g4x_update_wm; 4672 dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@ void intel_modeset_init(struct drm_device *dev)
4403 num_pipe = 2; 4722 num_pipe = 2;
4404 else 4723 else
4405 num_pipe = 1; 4724 num_pipe = 1;
4406 DRM_DEBUG("%d display pipe%s available.\n", 4725 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4407 num_pipe, num_pipe > 1 ? "s" : ""); 4726 num_pipe, num_pipe > 1 ? "s" : "");
4408 4727
4409 if (IS_I85X(dev)) 4728 if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@ void intel_modeset_init(struct drm_device *dev)
4422 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4741 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4423 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4742 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4424 (unsigned long)dev); 4743 (unsigned long)dev);
4744
4745 intel_setup_overlay(dev);
4746
4747 if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4748 dev_priv->fsb_freq,
4749 dev_priv->mem_freq))
4750 DRM_INFO("failed to find known CxSR latency "
4751 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
4752 dev_priv->fsb_freq, dev_priv->mem_freq);
4425} 4753}
4426 4754
4427void intel_modeset_cleanup(struct drm_device *dev) 4755void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
4445 intel_increase_renderclock(dev, false); 4773 intel_increase_renderclock(dev, false);
4446 del_timer_sync(&dev_priv->idle_timer); 4774 del_timer_sync(&dev_priv->idle_timer);
4447 4775
4448 mutex_unlock(&dev->struct_mutex);
4449
4450 if (dev_priv->display.disable_fbc) 4776 if (dev_priv->display.disable_fbc)
4451 dev_priv->display.disable_fbc(dev); 4777 dev_priv->display.disable_fbc(dev);
4452 4778
4779 if (dev_priv->pwrctx) {
4780 struct drm_i915_gem_object *obj_priv;
4781
4782 obj_priv = dev_priv->pwrctx->driver_private;
4783 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4784 I915_READ(PWRCTXA);
4785 i915_gem_object_unpin(dev_priv->pwrctx);
4786 drm_gem_object_unreference(dev_priv->pwrctx);
4787 }
4788
4789 mutex_unlock(&dev->struct_mutex);
4790
4453 drm_mode_config_cleanup(dev); 4791 drm_mode_config_cleanup(dev);
4454} 4792}
4455 4793
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9b..4e7aa8b7b938 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "intel_dp.h" 36#include "drm_dp_helper.h"
37
37 38
38#define DP_LINK_STATUS_SIZE 6 39#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
223 */ 224 */
224 if (IS_eDP(intel_output)) 225 if (IS_eDP(intel_output))
225 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 226 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
226 else if (IS_IGDNG(dev)) 227 else if (IS_IRONLAKE(dev))
227 aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ 228 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
228 else 229 else
229 aux_clock_divider = intel_hrawclk(dev) / 2; 230 aux_clock_divider = intel_hrawclk(dev) / 2;
230 231
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
282 /* Timeouts occur when the device isn't connected, so they're 283 /* Timeouts occur when the device isn't connected, so they're
283 * "normal" -- don't fill the kernel log with these */ 284 * "normal" -- don't fill the kernel log with these */
284 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 285 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
285 DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); 286 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
286 return -ETIMEDOUT; 287 return -ETIMEDOUT;
287 } 288 }
288 289
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
382} 383}
383 384
384static int 385static int
385intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, 386intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
386 uint8_t *send, int send_bytes, 387 uint8_t write_byte, uint8_t *read_byte)
387 uint8_t *recv, int recv_bytes)
388{ 388{
389 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
389 struct intel_dp_priv *dp_priv = container_of(adapter, 390 struct intel_dp_priv *dp_priv = container_of(adapter,
390 struct intel_dp_priv, 391 struct intel_dp_priv,
391 adapter); 392 adapter);
392 struct intel_output *intel_output = dp_priv->intel_output; 393 struct intel_output *intel_output = dp_priv->intel_output;
394 uint16_t address = algo_data->address;
395 uint8_t msg[5];
396 uint8_t reply[2];
397 int msg_bytes;
398 int reply_bytes;
399 int ret;
400
401 /* Set up the command byte */
402 if (mode & MODE_I2C_READ)
403 msg[0] = AUX_I2C_READ << 4;
404 else
405 msg[0] = AUX_I2C_WRITE << 4;
406
407 if (!(mode & MODE_I2C_STOP))
408 msg[0] |= AUX_I2C_MOT << 4;
393 409
394 return intel_dp_aux_ch(intel_output, 410 msg[1] = address >> 8;
395 send, send_bytes, recv, recv_bytes); 411 msg[2] = address;
412
413 switch (mode) {
414 case MODE_I2C_WRITE:
415 msg[3] = 0;
416 msg[4] = write_byte;
417 msg_bytes = 5;
418 reply_bytes = 1;
419 break;
420 case MODE_I2C_READ:
421 msg[3] = 0;
422 msg_bytes = 4;
423 reply_bytes = 2;
424 break;
425 default:
426 msg_bytes = 3;
427 reply_bytes = 1;
428 break;
429 }
430
431 for (;;) {
432 ret = intel_dp_aux_ch(intel_output,
433 msg, msg_bytes,
434 reply, reply_bytes);
435 if (ret < 0) {
436 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
437 return ret;
438 }
439 switch (reply[0] & AUX_I2C_REPLY_MASK) {
440 case AUX_I2C_REPLY_ACK:
441 if (mode == MODE_I2C_READ) {
442 *read_byte = reply[1];
443 }
444 return reply_bytes - 1;
445 case AUX_I2C_REPLY_NACK:
446 DRM_DEBUG_KMS("aux_ch nack\n");
447 return -EREMOTEIO;
448 case AUX_I2C_REPLY_DEFER:
449 DRM_DEBUG_KMS("aux_ch defer\n");
450 udelay(100);
451 break;
452 default:
453 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
454 return -EREMOTEIO;
455 }
456 }
396} 457}
397 458
398static int 459static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
435 dp_priv->link_bw = bws[clock]; 496 dp_priv->link_bw = bws[clock];
436 dp_priv->lane_count = lane_count; 497 dp_priv->lane_count = lane_count;
437 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
438 DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", 499 DRM_DEBUG_KMS("Display port link bw %02x lane "
500 "count %d clock %d\n",
439 dp_priv->link_bw, dp_priv->lane_count, 501 dp_priv->link_bw, dp_priv->lane_count,
440 adjusted_mode->clock); 502 adjusted_mode->clock);
441 return true; 503 return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
514 intel_dp_compute_m_n(3, lane_count, 576 intel_dp_compute_m_n(3, lane_count,
515 mode->clock, adjusted_mode->clock, &m_n); 577 mode->clock, adjusted_mode->clock, &m_n);
516 578
517 if (IS_IGDNG(dev)) { 579 if (IS_IRONLAKE(dev)) {
518 if (intel_crtc->pipe == 0) { 580 if (intel_crtc->pipe == 0) {
519 I915_WRITE(TRANSA_DATA_M1, 581 I915_WRITE(TRANSA_DATA_M1,
520 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 582 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
606 } 668 }
607} 669}
608 670
609static void igdng_edp_backlight_on (struct drm_device *dev) 671static void ironlake_edp_backlight_on (struct drm_device *dev)
610{ 672{
611 struct drm_i915_private *dev_priv = dev->dev_private; 673 struct drm_i915_private *dev_priv = dev->dev_private;
612 u32 pp; 674 u32 pp;
613 675
614 DRM_DEBUG("\n"); 676 DRM_DEBUG_KMS("\n");
615 pp = I915_READ(PCH_PP_CONTROL); 677 pp = I915_READ(PCH_PP_CONTROL);
616 pp |= EDP_BLC_ENABLE; 678 pp |= EDP_BLC_ENABLE;
617 I915_WRITE(PCH_PP_CONTROL, pp); 679 I915_WRITE(PCH_PP_CONTROL, pp);
618} 680}
619 681
620static void igdng_edp_backlight_off (struct drm_device *dev) 682static void ironlake_edp_backlight_off (struct drm_device *dev)
621{ 683{
622 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = dev->dev_private;
623 u32 pp; 685 u32 pp;
624 686
625 DRM_DEBUG("\n"); 687 DRM_DEBUG_KMS("\n");
626 pp = I915_READ(PCH_PP_CONTROL); 688 pp = I915_READ(PCH_PP_CONTROL);
627 pp &= ~EDP_BLC_ENABLE; 689 pp &= ~EDP_BLC_ENABLE;
628 I915_WRITE(PCH_PP_CONTROL, pp); 690 I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
641 if (dp_reg & DP_PORT_EN) { 703 if (dp_reg & DP_PORT_EN) {
642 intel_dp_link_down(intel_output, dp_priv->DP); 704 intel_dp_link_down(intel_output, dp_priv->DP);
643 if (IS_eDP(intel_output)) 705 if (IS_eDP(intel_output))
644 igdng_edp_backlight_off(dev); 706 ironlake_edp_backlight_off(dev);
645 } 707 }
646 } else { 708 } else {
647 if (!(dp_reg & DP_PORT_EN)) { 709 if (!(dp_reg & DP_PORT_EN)) {
648 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 710 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
649 if (IS_eDP(intel_output)) 711 if (IS_eDP(intel_output))
650 igdng_edp_backlight_on(dev); 712 ironlake_edp_backlight_on(dev);
651 } 713 }
652 } 714 }
653 dp_priv->dpms_mode = mode; 715 dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1010 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_i915_private *dev_priv = dev->dev_private;
1011 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1073 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1012 1074
1013 DRM_DEBUG("\n"); 1075 DRM_DEBUG_KMS("\n");
1014 1076
1015 if (IS_eDP(intel_output)) { 1077 if (IS_eDP(intel_output)) {
1016 DP &= ~DP_PLL_ENABLE; 1078 DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
1071} 1133}
1072 1134
1073static enum drm_connector_status 1135static enum drm_connector_status
1074igdng_dp_detect(struct drm_connector *connector) 1136ironlake_dp_detect(struct drm_connector *connector)
1075{ 1137{
1076 struct intel_output *intel_output = to_intel_output(connector); 1138 struct intel_output *intel_output = to_intel_output(connector);
1077 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1139 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
1106 1168
1107 dp_priv->has_audio = false; 1169 dp_priv->has_audio = false;
1108 1170
1109 if (IS_IGDNG(dev)) 1171 if (IS_IRONLAKE(dev))
1110 return igdng_dp_detect(connector); 1172 return ironlake_dp_detect(connector);
1111 1173
1112 temp = I915_READ(PORT_HOTPLUG_EN); 1174 temp = I915_READ(PORT_HOTPLUG_EN);
1113 1175
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1227 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1228 intel_dp_check_link_status(intel_output); 1290 intel_dp_check_link_status(intel_output);
1229} 1291}
1230 1292/*
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1231void 1339void
1232intel_dp_init(struct drm_device *dev, int output_reg) 1340intel_dp_init(struct drm_device *dev, int output_reg)
1233{ 1341{
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1237 struct intel_dp_priv *dp_priv; 1345 struct intel_dp_priv *dp_priv;
1238 const char *name = NULL; 1346 const char *name = NULL;
1239 1347
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1240 intel_output = kcalloc(sizeof(struct intel_output) + 1352 intel_output = kcalloc(sizeof(struct intel_output) +
1241 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1242 if (!intel_output) 1354 if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1254 else 1366 else
1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1367 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
1256 1368
1257 if (output_reg == DP_B) 1369 if (output_reg == DP_B || output_reg == PCH_DP_B)
1258 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1370 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1259 else if (output_reg == DP_C) 1371 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1260 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1372 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1261 else if (output_reg == DP_D) 1373 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1262 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1263 1375
1264 if (IS_eDP(intel_output)) { 1376 if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
deleted file mode 100644
index 2b38054d3b6d..000000000000
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ /dev/null
@@ -1,144 +0,0 @@
1/*
2 * Copyright © 2008 Keith Packard
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#ifndef _INTEL_DP_H_
24#define _INTEL_DP_H_
25
26/* From the VESA DisplayPort spec */
27
28#define AUX_NATIVE_WRITE 0x8
29#define AUX_NATIVE_READ 0x9
30#define AUX_I2C_WRITE 0x0
31#define AUX_I2C_READ 0x1
32#define AUX_I2C_STATUS 0x2
33#define AUX_I2C_MOT 0x4
34
35#define AUX_NATIVE_REPLY_ACK (0x0 << 4)
36#define AUX_NATIVE_REPLY_NACK (0x1 << 4)
37#define AUX_NATIVE_REPLY_DEFER (0x2 << 4)
38#define AUX_NATIVE_REPLY_MASK (0x3 << 4)
39
40#define AUX_I2C_REPLY_ACK (0x0 << 6)
41#define AUX_I2C_REPLY_NACK (0x1 << 6)
42#define AUX_I2C_REPLY_DEFER (0x2 << 6)
43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44
45/* AUX CH addresses */
46#define DP_LINK_BW_SET 0x100
47# define DP_LINK_BW_1_62 0x06
48# define DP_LINK_BW_2_7 0x0a
49
50#define DP_LANE_COUNT_SET 0x101
51# define DP_LANE_COUNT_MASK 0x0f
52# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
53
54#define DP_TRAINING_PATTERN_SET 0x102
55
56# define DP_TRAINING_PATTERN_DISABLE 0
57# define DP_TRAINING_PATTERN_1 1
58# define DP_TRAINING_PATTERN_2 2
59# define DP_TRAINING_PATTERN_MASK 0x3
60
61# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2)
62# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2)
63# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2)
64# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2)
65# define DP_LINK_QUAL_PATTERN_MASK (3 << 2)
66
67# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4)
68# define DP_LINK_SCRAMBLING_DISABLE (1 << 5)
69
70# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6)
71# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6)
72# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6)
73# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6)
74
75#define DP_TRAINING_LANE0_SET 0x103
76#define DP_TRAINING_LANE1_SET 0x104
77#define DP_TRAINING_LANE2_SET 0x105
78#define DP_TRAINING_LANE3_SET 0x106
79
80# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3
81# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0
82# define DP_TRAIN_MAX_SWING_REACHED (1 << 2)
83# define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0)
84# define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0)
85# define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0)
86# define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0)
87
88# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3)
89# define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3)
90# define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3)
91# define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3)
92# define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3)
93
94# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
95# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
96
97#define DP_DOWNSPREAD_CTRL 0x107
98# define DP_SPREAD_AMP_0_5 (1 << 4)
99
100#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
101# define DP_SET_ANSI_8B10B (1 << 0)
102
103#define DP_LANE0_1_STATUS 0x202
104#define DP_LANE2_3_STATUS 0x203
105
106# define DP_LANE_CR_DONE (1 << 0)
107# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
108# define DP_LANE_SYMBOL_LOCKED (1 << 2)
109
110#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
111
112#define DP_INTERLANE_ALIGN_DONE (1 << 0)
113#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6)
114#define DP_LINK_STATUS_UPDATED (1 << 7)
115
116#define DP_SINK_STATUS 0x205
117
118#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
119#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
120
121#define DP_ADJUST_REQUEST_LANE0_1 0x206
122#define DP_ADJUST_REQUEST_LANE2_3 0x207
123
124#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
125#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
126#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
127#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
128#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
129#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
130#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
131#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
132
133struct i2c_algo_dp_aux_data {
134 bool running;
135 u16 address;
136 int (*aux_ch) (struct i2c_adapter *adapter,
137 uint8_t *send, int send_bytes,
138 uint8_t *recv, int recv_bytes);
139};
140
141int
142i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
143
144#endif /* _INTEL_DP_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e2..a51573da1ff6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@ struct intel_output {
110 int clone_mask; 110 int clone_mask;
111}; 111};
112 112
113struct intel_crtc;
114struct intel_overlay {
115 struct drm_device *dev;
116 struct intel_crtc *crtc;
117 struct drm_i915_gem_object *vid_bo;
118 struct drm_i915_gem_object *old_vid_bo;
119 int active;
120 int pfit_active;
121 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
122 u32 color_key;
123 u32 brightness, contrast, saturation;
124 u32 old_xscale, old_yscale;
125 /* register access */
126 u32 flip_addr;
127 struct drm_i915_gem_object *reg_bo;
128 void *virt_addr;
129 /* flip handling */
130 uint32_t last_flip_req;
131 int hw_wedged;
132#define HW_WEDGED 1
133#define NEEDS_WAIT_FOR_FLIP 2
134#define RELEASE_OLD_VID 3
135#define SWITCH_OFF_STAGE_1 4
136#define SWITCH_OFF_STAGE_2 5
137};
138
113struct intel_crtc { 139struct intel_crtc {
114 struct drm_crtc base; 140 struct drm_crtc base;
115 enum pipe pipe; 141 enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
121 bool busy; /* is scanout buffer being updated frequently? */ 147 bool busy; /* is scanout buffer being updated frequently? */
122 struct timer_list idle_timer; 148 struct timer_list idle_timer;
123 bool lowfreq_avail; 149 bool lowfreq_avail;
150 struct intel_overlay *overlay;
151 struct intel_unpin_work *unpin_work;
124}; 152};
125 153
126#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
134int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_output *intel_output);
135extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_output *intel_output);
136void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev);
166
137extern void intel_crt_init(struct drm_device *dev); 167extern void intel_crt_init(struct drm_device *dev);
138extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 168extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
139extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 169extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
148extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_output *, int *, int *);
149 179
150 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev);
151extern void intel_crtc_load_lut(struct drm_crtc *crtc); 182extern void intel_crtc_load_lut(struct drm_crtc *crtc);
152extern void intel_encoder_prepare (struct drm_encoder *encoder); 183extern void intel_encoder_prepare (struct drm_encoder *encoder);
153extern void intel_encoder_commit (struct drm_encoder *encoder); 184extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
177 u16 blue, int regno); 208 u16 blue, int regno);
178extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev);
180 212
181extern int intel_framebuffer_create(struct drm_device *dev, 213extern int intel_framebuffer_create(struct drm_device *dev,
182 struct drm_mode_fb_cmd *mode_cmd, 214 struct drm_mode_fb_cmd *mode_cmd,
183 struct drm_framebuffer **fb, 215 struct drm_framebuffer **fb,
184 struct drm_gem_object *obj); 216 struct drm_gem_object *obj);
185 217
218extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
219extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
220
221extern void intel_setup_overlay(struct drm_device *dev);
222extern void intel_cleanup_overlay(struct drm_device *dev);
223extern int intel_overlay_switch_off(struct intel_overlay *overlay);
224extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
225 int interruptible);
226extern int intel_overlay_put_image(struct drm_device *dev, void *data,
227 struct drm_file *file_priv);
228extern int intel_overlay_attrs(struct drm_device *dev, void *data,
229 struct drm_file *file_priv);
186#endif /* __INTEL_DRV_H__ */ 230#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 40fcf6fdef38..371d753e362b 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
230 par->intel_fb = intel_fb; 230 par->intel_fb = intel_fb;
231 231
232 /* To allow resizeing without swapping buffers */ 232 /* To allow resizeing without swapping buffers */
233 DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, 233 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
234 intel_fb->base.height, obj_priv->gtt_offset, fbo); 234 intel_fb->base.width, intel_fb->base.height,
235 obj_priv->gtt_offset, fbo);
235 236
236 mutex_unlock(&dev->struct_mutex); 237 mutex_unlock(&dev->struct_mutex);
237 return 0; 238 return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
249{ 250{
250 int ret; 251 int ret;
251 252
252 DRM_DEBUG("\n"); 253 DRM_DEBUG_KMS("\n");
253 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); 254 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
254 return ret; 255 return ret;
255} 256}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1bd..f04dbbe7d400 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IGDNG(dev)) { 85 if (IS_IRONLAKE(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IGDNG(dev)) { 102 if (IS_IRONLAKE(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228 228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
229void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230{ 275{
231 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
233 struct intel_output *intel_output; 278 struct intel_output *intel_output;
234 struct intel_hdmi_priv *hdmi_priv; 279 struct intel_hdmi_priv *hdmi_priv;
235 280
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
236 intel_output = kcalloc(sizeof(struct intel_output) + 285 intel_output = kcalloc(sizeof(struct intel_output) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_output) 287 if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c418..8673c735b8ab 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 40
41 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 41 /* When using bit bashing for I2C, this bit needs to be set to 1 */
42 if (!IS_IGD(dev)) 42 if (!IS_PINEVIEW(dev))
43 return; 43 return;
44 if (enable) 44 if (enable)
45 I915_WRITE(DSPCLK_GATE_D, 45 I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
118 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 118 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
119} 119}
120 120
121/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
122 * engine, but if the BIOS leaves it enabled, then that can break our use
123 * of the bit-banging I2C interfaces. This is notably the case with the
124 * Mac Mini in EFI mode.
125 */
126void
127intel_i2c_reset_gmbus(struct drm_device *dev)
128{
129 struct drm_i915_private *dev_priv = dev->dev_private;
130
131 if (IS_IRONLAKE(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0);
133 } else {
134 I915_WRITE(GMBUS0, 0);
135 }
136}
137
121/** 138/**
122 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 139 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
123 * @dev: DRM device 140 * @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
168 if(i2c_bit_add_bus(&chan->adapter)) 185 if(i2c_bit_add_bus(&chan->adapter))
169 goto out_free; 186 goto out_free;
170 187
188 intel_i2c_reset_gmbus(dev);
189
171 /* JJJ: raise SCL and SDA? */ 190 /* JJJ: raise SCL and SDA? */
172 intel_i2c_quirk_set(dev, true); 191 intel_i2c_quirk_set(dev, true);
173 set_data(chan, 1); 192 set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index eb365021bb5a..3118ce274e67 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IGDNG(dev)) 59 if (IS_IRONLAKE(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IGDNG(dev)) 77 if (IS_IRONLAKE(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg;
93 93
94 if (IS_IGDNG(dev)) { 94 if (IS_IRONLAKE(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 } else { 97 } else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 138 u32 pwm_ctl_reg;
139 139
140 if (IS_IGDNG(dev)) { 140 if (IS_IRONLAKE(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 141 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 142 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 143 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 175 u32 pwm_ctl_reg;
176 176
177 if (IS_IGDNG(dev)) { 177 if (IS_IRONLAKE(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 178 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 179 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 180 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 297 }
298 298
299 /* full screen scale for now */ 299 /* full screen scale for now */
300 if (IS_IGDNG(dev)) 300 if (IS_IRONLAKE(dev))
301 goto out; 301 goto out;
302 302
303 /* 965+ wants fuzzy fitting */ 303 /* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 327 * to register description and PRM.
328 * Change the value here to see the borders for debugging 328 * Change the value here to see the borders for debugging
329 */ 329 */
330 if (!IS_IGDNG(dev)) { 330 if (!IS_IRONLAKE(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 331 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 332 I915_WRITE(BCLRPAT_B, 0);
333 } 333 }
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 548 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 549 u32 reg;
550 550
551 if (IS_IGDNG(dev)) 551 if (IS_IRONLAKE(dev))
552 reg = BLC_PWM_CPU_CTL; 552 reg = BLC_PWM_CPU_CTL;
553 else 553 else
554 reg = BLC_PWM_CTL; 554 reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 587 * settings.
588 */ 588 */
589 589
590 if (IS_IGDNG(dev)) 590 if (IS_IRONLAKE(dev))
591 return; 591 return;
592 592
593 /* 593 /*
@@ -914,6 +914,101 @@ static int intel_lid_present(void)
914#endif 914#endif
915 915
916/** 916/**
917 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
918 * @dev: drm device
919 * @connector: LVDS connector
920 *
921 * Find the reduced downclock for LVDS in EDID.
922 */
923static void intel_find_lvds_downclock(struct drm_device *dev,
924 struct drm_connector *connector)
925{
926 struct drm_i915_private *dev_priv = dev->dev_private;
927 struct drm_display_mode *scan, *panel_fixed_mode;
928 int temp_downclock;
929
930 panel_fixed_mode = dev_priv->panel_fixed_mode;
931 temp_downclock = panel_fixed_mode->clock;
932
933 mutex_lock(&dev->mode_config.mutex);
934 list_for_each_entry(scan, &connector->probed_modes, head) {
935 /*
936 * If one mode has the same resolution with the fixed_panel
937 * mode while they have the different refresh rate, it means
938 * that the reduced downclock is found for the LVDS. In such
939 * case we can set the different FPx0/1 to dynamically select
940 * between low and high frequency.
941 */
942 if (scan->hdisplay == panel_fixed_mode->hdisplay &&
943 scan->hsync_start == panel_fixed_mode->hsync_start &&
944 scan->hsync_end == panel_fixed_mode->hsync_end &&
945 scan->htotal == panel_fixed_mode->htotal &&
946 scan->vdisplay == panel_fixed_mode->vdisplay &&
947 scan->vsync_start == panel_fixed_mode->vsync_start &&
948 scan->vsync_end == panel_fixed_mode->vsync_end &&
949 scan->vtotal == panel_fixed_mode->vtotal) {
950 if (scan->clock < temp_downclock) {
951 /*
952 * The downclock is already found. But we
953 * expect to find the lower downclock.
954 */
955 temp_downclock = scan->clock;
956 }
957 }
958 }
959 mutex_unlock(&dev->mode_config.mutex);
960 if (temp_downclock < panel_fixed_mode->clock) {
961 /* We found the downclock for LVDS. */
962 dev_priv->lvds_downclock_avail = 1;
963 dev_priv->lvds_downclock = temp_downclock;
964 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
965 "Normal clock %dKhz, downclock %dKhz\n",
966 panel_fixed_mode->clock, temp_downclock);
967 }
968 return;
969}
970
971/*
972 * Enumerate the child dev array parsed from VBT to check whether
973 * the LVDS is present.
974 * If it is present, return 1.
975 * If it is not present, return false.
976 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
977 * Note: The addin_offset should also be checked for LVDS panel.
978 * Only when it is non-zero, it is assumed that it is present.
979 */
980static int lvds_is_present_in_vbt(struct drm_device *dev)
981{
982 struct drm_i915_private *dev_priv = dev->dev_private;
983 struct child_device_config *p_child;
984 int i, ret;
985
986 if (!dev_priv->child_dev_num)
987 return 1;
988
989 ret = 0;
990 for (i = 0; i < dev_priv->child_dev_num; i++) {
991 p_child = dev_priv->child_dev + i;
992 /*
993 * If the device type is not LFP, continue.
994 * If the device type is 0x22, it is also regarded as LFP.
995 */
996 if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
997 p_child->device_type != DEVICE_TYPE_LFP)
998 continue;
999
1000 /* The addin_offset should be checked. Only when it is
1001 * non-zero, it is regarded as present.
1002 */
1003 if (p_child->addin_offset) {
1004 ret = 1;
1005 break;
1006 }
1007 }
1008 return ret;
1009}
1010
1011/**
917 * intel_lvds_init - setup LVDS connectors on this device 1012 * intel_lvds_init - setup LVDS connectors on this device
918 * @dev: drm device 1013 * @dev: drm device
919 * 1014 *
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
936 if (dmi_check_system(intel_no_lvds)) 1031 if (dmi_check_system(intel_no_lvds))
937 return; 1032 return;
938 1033
939 /* Assume that any device without an ACPI LID device also doesn't 1034 /*
940 * have an integrated LVDS. We would be better off parsing the BIOS 1035 * Assume LVDS is present if there's an ACPI lid device or if the
941 * to get a reliable indicator, but that code isn't written yet. 1036 * device is present in the VBT.
942 *
943 * In the case of all-in-one desktops using LVDS that we've seen,
944 * they're using SDVO LVDS.
945 */ 1037 */
946 if (!intel_lid_present()) 1038 if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
1039 DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
947 return; 1040 return;
1041 }
948 1042
949 if (IS_IGDNG(dev)) { 1043 if (IS_IRONLAKE(dev)) {
950 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1044 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
951 return; 1045 return;
952 if (dev_priv->edp_support) { 1046 if (dev_priv->edp_support) {
953 DRM_DEBUG("disable LVDS for eDP support\n"); 1047 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
954 return; 1048 return;
955 } 1049 }
956 gpio = PCH_GPIOC; 1050 gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
1023 dev_priv->panel_fixed_mode = 1117 dev_priv->panel_fixed_mode =
1024 drm_mode_duplicate(dev, scan); 1118 drm_mode_duplicate(dev, scan);
1025 mutex_unlock(&dev->mode_config.mutex); 1119 mutex_unlock(&dev->mode_config.mutex);
1120 intel_find_lvds_downclock(dev, connector);
1026 goto out; 1121 goto out;
1027 } 1122 }
1028 mutex_unlock(&dev->mode_config.mutex); 1123 mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
1047 * correct mode. 1142 * correct mode.
1048 */ 1143 */
1049 1144
1050 /* IGDNG: FIXME if still fail, not try pipe mode now */ 1145 /* Ironlake: FIXME if still fail, not try pipe mode now */
1051 if (IS_IGDNG(dev)) 1146 if (IS_IRONLAKE(dev))
1052 goto failed; 1147 goto failed;
1053 1148
1054 lvds = I915_READ(LVDS); 1149 lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
1069 goto failed; 1164 goto failed;
1070 1165
1071out: 1166out:
1072 if (IS_IGDNG(dev)) { 1167 if (IS_IRONLAKE(dev)) {
1073 u32 pwm; 1168 u32 pwm;
1074 /* make sure PWM is enabled */ 1169 /* make sure PWM is enabled */
1075 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1170 pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
1082 } 1177 }
1083 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1178 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1084 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1179 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1085 DRM_DEBUG("lid notifier registration failed\n"); 1180 DRM_DEBUG_KMS("lid notifier registration failed\n");
1086 dev_priv->lid_notifier.notifier_call = NULL; 1181 dev_priv->lid_notifier.notifier_call = NULL;
1087 } 1182 }
1088 drm_sysfs_connector_add(connector); 1183 drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
1093 if (intel_output->ddc_bus) 1188 if (intel_output->ddc_bus)
1094 intel_i2c_destroy(intel_output->ddc_bus); 1189 intel_i2c_destroy(intel_output->ddc_bus);
1095 drm_connector_cleanup(connector); 1190 drm_connector_cleanup(connector);
1191 drm_encoder_cleanup(encoder);
1096 kfree(intel_output); 1192 kfree(intel_output);
1097} 1193}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 000000000000..2639591c72e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
1/*
2 * Copyright © 2009
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel@ffwll.ch>
25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include "i915_reg.h"
33#include "intel_drv.h"
34
35/* Limits for overlay size. According to intel doc, the real limits are:
36 * Y width: 4095, UV width (planar): 2047, Y height: 2047,
37 * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
38 * the mininum of both. */
39#define IMAGE_MAX_WIDTH 2048
40#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
41/* on 830 and 845 these large limits result in the card hanging */
42#define IMAGE_MAX_WIDTH_LEGACY 1024
43#define IMAGE_MAX_HEIGHT_LEGACY 1088
44
45/* overlay register definitions */
46/* OCMD register */
47#define OCMD_TILED_SURFACE (0x1<<19)
48#define OCMD_MIRROR_MASK (0x3<<17)
49#define OCMD_MIRROR_MODE (0x3<<17)
50#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
51#define OCMD_MIRROR_VERTICAL (0x2<<17)
52#define OCMD_MIRROR_BOTH (0x3<<17)
53#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
54#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
55#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
56#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
57#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
58#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
59#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
60#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
61#define OCMD_YUV_422_PACKED (0x8<<10)
62#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
63#define OCMD_YUV_420_PLANAR (0xc<<10)
64#define OCMD_YUV_422_PLANAR (0xd<<10)
65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
68#define OCMD_BUF_TYPE_MASK (Ox1<<5)
69#define OCMD_BUF_TYPE_FRAME (0x0<<5)
70#define OCMD_BUF_TYPE_FIELD (0x1<<5)
71#define OCMD_TEST_MODE (0x1<<4)
72#define OCMD_BUFFER_SELECT (0x3<<2)
73#define OCMD_BUFFER0 (0x0<<2)
74#define OCMD_BUFFER1 (0x1<<2)
75#define OCMD_FIELD_SELECT (0x1<<2)
76#define OCMD_FIELD0 (0x0<<1)
77#define OCMD_FIELD1 (0x1<<1)
78#define OCMD_ENABLE (0x1<<0)
79
80/* OCONFIG register */
81#define OCONF_PIPE_MASK (0x1<<18)
82#define OCONF_PIPE_A (0x0<<18)
83#define OCONF_PIPE_B (0x1<<18)
84#define OCONF_GAMMA2_ENABLE (0x1<<16)
85#define OCONF_CSC_MODE_BT601 (0x0<<5)
86#define OCONF_CSC_MODE_BT709 (0x1<<5)
87#define OCONF_CSC_BYPASS (0x1<<4)
88#define OCONF_CC_OUT_8BIT (0x1<<3)
89#define OCONF_TEST_MODE (0x1<<2)
90#define OCONF_THREE_LINE_BUFFER (0x1<<0)
91#define OCONF_TWO_LINE_BUFFER (0x0<<0)
92
93/* DCLRKM (dst-key) register */
94#define DST_KEY_ENABLE (0x1<<31)
95#define CLK_RGB24_MASK 0x0
96#define CLK_RGB16_MASK 0x070307
97#define CLK_RGB15_MASK 0x070707
98#define CLK_RGB8I_MASK 0xffffff
99
100#define RGB16_TO_COLORKEY(c) \
101 (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
102#define RGB15_TO_COLORKEY(c) \
103 (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
104
105/* overlay flip addr flag */
106#define OFC_UPDATE 0x1
107
108/* polyphase filter coefficients */
109#define N_HORIZ_Y_TAPS 5
110#define N_VERT_Y_TAPS 3
111#define N_HORIZ_UV_TAPS 3
112#define N_VERT_UV_TAPS 3
113#define N_PHASES 17
114#define MAX_TAPS 5
115
116/* memory bufferd overlay registers */
117struct overlay_registers {
118 u32 OBUF_0Y;
119 u32 OBUF_1Y;
120 u32 OBUF_0U;
121 u32 OBUF_0V;
122 u32 OBUF_1U;
123 u32 OBUF_1V;
124 u32 OSTRIDE;
125 u32 YRGB_VPH;
126 u32 UV_VPH;
127 u32 HORZ_PH;
128 u32 INIT_PHS;
129 u32 DWINPOS;
130 u32 DWINSZ;
131 u32 SWIDTH;
132 u32 SWIDTHSW;
133 u32 SHEIGHT;
134 u32 YRGBSCALE;
135 u32 UVSCALE;
136 u32 OCLRC0;
137 u32 OCLRC1;
138 u32 DCLRKV;
139 u32 DCLRKM;
140 u32 SCLRKVH;
141 u32 SCLRKVL;
142 u32 SCLRKEN;
143 u32 OCONFIG;
144 u32 OCMD;
145 u32 RESERVED1; /* 0x6C */
146 u32 OSTART_0Y;
147 u32 OSTART_1Y;
148 u32 OSTART_0U;
149 u32 OSTART_0V;
150 u32 OSTART_1U;
151 u32 OSTART_1V;
152 u32 OTILEOFF_0Y;
153 u32 OTILEOFF_1Y;
154 u32 OTILEOFF_0U;
155 u32 OTILEOFF_0V;
156 u32 OTILEOFF_1U;
157 u32 OTILEOFF_1V;
158 u32 FASTHSCALE; /* 0xA0 */
159 u32 UVSCALEV; /* 0xA4 */
160 u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
161 u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
162 u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
163 u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
164 u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
165 u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
166 u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
167 u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
168 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
169};
170
171/* overlay flip addr flag */
172#define OFC_UPDATE 0x1
173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
176
177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
179{
180 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
181 struct overlay_registers *regs;
182
183 /* no recursive mappings */
184 BUG_ON(overlay->virt_addr);
185
186 if (OVERLAY_NONPHYSICAL(overlay->dev)) {
187 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
188 overlay->reg_bo->gtt_offset);
189
190 if (!regs) {
191 DRM_ERROR("failed to map overlay regs in GTT\n");
192 return NULL;
193 }
194 } else
195 regs = overlay->reg_bo->phys_obj->handle->vaddr;
196
197 return overlay->virt_addr = regs;
198}
199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr);
207
208 overlay->virt_addr = NULL;
209
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return;
213}
214
215/* overlay needs to be disable in OCMD reg */
216static int intel_overlay_on(struct intel_overlay *overlay)
217{
218 struct drm_device *dev = overlay->dev;
219 drm_i915_private_t *dev_priv = dev->dev_private;
220 int ret;
221 RING_LOCALS;
222
223 BUG_ON(overlay->active);
224
225 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227
228 BEGIN_LP_RING(6);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
234 OUT_RING(MI_NOOP);
235 ADVANCE_LP_RING();
236
237 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
238 if (overlay->last_flip_req == 0)
239 return -ENOMEM;
240
241 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
242 if (ret != 0)
243 return ret;
244
245 overlay->hw_wedged = 0;
246 overlay->last_flip_req = 0;
247 return 0;
248}
249
250/* overlay needs to be enabled in OCMD reg */
251static void intel_overlay_continue(struct intel_overlay *overlay,
252 bool load_polyphase_filter)
253{
254 struct drm_device *dev = overlay->dev;
255 drm_i915_private_t *dev_priv = dev->dev_private;
256 u32 flip_addr = overlay->flip_addr;
257 u32 tmp;
258 RING_LOCALS;
259
260 BUG_ON(!overlay->active);
261
262 if (load_polyphase_filter)
263 flip_addr |= OFC_UPDATE;
264
265 /* check for underruns */
266 tmp = I915_READ(DOVSTA);
267 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269
270 BEGIN_LP_RING(4);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr);
275 ADVANCE_LP_RING();
276
277 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
278}
279
280static int intel_overlay_wait_flip(struct intel_overlay *overlay)
281{
282 struct drm_device *dev = overlay->dev;
283 drm_i915_private_t *dev_priv = dev->dev_private;
284 int ret;
285 u32 tmp;
286 RING_LOCALS;
287
288 if (overlay->last_flip_req != 0) {
289 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
290 if (ret == 0) {
291 overlay->last_flip_req = 0;
292
293 tmp = I915_READ(ISR);
294
295 if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
296 return 0;
297 }
298 }
299
300 /* synchronous slowpath */
301 overlay->hw_wedged = RELEASE_OLD_VID;
302
303 BEGIN_LP_RING(2);
304 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
305 OUT_RING(MI_NOOP);
306 ADVANCE_LP_RING();
307
308 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
309 if (overlay->last_flip_req == 0)
310 return -ENOMEM;
311
312 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
313 if (ret != 0)
314 return ret;
315
316 overlay->hw_wedged = 0;
317 overlay->last_flip_req = 0;
318 return 0;
319}
320
321/* overlay needs to be disabled in OCMD reg */
322static int intel_overlay_off(struct intel_overlay *overlay)
323{
324 u32 flip_addr = overlay->flip_addr;
325 struct drm_device *dev = overlay->dev;
326 drm_i915_private_t *dev_priv = dev->dev_private;
327 int ret;
328 RING_LOCALS;
329
330 BUG_ON(!overlay->active);
331
332 /* According to intel docs the overlay hw may hang (when switching
333 * off) without loading the filter coeffs. It is however unclear whether
334 * this applies to the disabling of the overlay or to the switching off
335 * of the hw. Do it in both cases */
336 flip_addr |= OFC_UPDATE;
337
338 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340
341 BEGIN_LP_RING(6);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
347 OUT_RING(MI_NOOP);
348 ADVANCE_LP_RING();
349
350 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
351 if (overlay->last_flip_req == 0)
352 return -ENOMEM;
353
354 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
355 if (ret != 0)
356 return ret;
357
358 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360
361 BEGIN_LP_RING(6);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 OUT_RING(MI_NOOP);
368 ADVANCE_LP_RING();
369
370 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
371 if (overlay->last_flip_req == 0)
372 return -ENOMEM;
373
374 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
375 if (ret != 0)
376 return ret;
377
378 overlay->hw_wedged = 0;
379 overlay->last_flip_req = 0;
380 return ret;
381}
382
383static void intel_overlay_off_tail(struct intel_overlay *overlay)
384{
385 struct drm_gem_object *obj;
386
387 /* never have the overlay hw on without showing a frame */
388 BUG_ON(!overlay->vid_bo);
389 obj = overlay->vid_bo->obj;
390
391 i915_gem_object_unpin(obj);
392 drm_gem_object_unreference(obj);
393 overlay->vid_bo = NULL;
394
395 overlay->crtc->overlay = NULL;
396 overlay->crtc = NULL;
397 overlay->active = 0;
398}
399
400/* recover from an interruption due to a signal
401 * We have to be careful not to repeat work forever an make forward progess. */
402int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
403 int interruptible)
404{
405 struct drm_device *dev = overlay->dev;
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 struct drm_gem_object *obj;
408 u32 flip_addr;
409 int ret;
410 RING_LOCALS;
411
412 if (overlay->hw_wedged == HW_WEDGED)
413 return -EIO;
414
415 if (overlay->last_flip_req == 0) {
416 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
417 if (overlay->last_flip_req == 0)
418 return -ENOMEM;
419 }
420
421 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
422 if (ret != 0)
423 return ret;
424
425 switch (overlay->hw_wedged) {
426 case RELEASE_OLD_VID:
427 obj = overlay->old_vid_bo->obj;
428 i915_gem_object_unpin(obj);
429 drm_gem_object_unreference(obj);
430 overlay->old_vid_bo = NULL;
431 break;
432 case SWITCH_OFF_STAGE_1:
433 flip_addr = overlay->flip_addr;
434 flip_addr |= OFC_UPDATE;
435
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437
438 BEGIN_LP_RING(6);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
444 OUT_RING(MI_NOOP);
445 ADVANCE_LP_RING();
446
447 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
448 if (overlay->last_flip_req == 0)
449 return -ENOMEM;
450
451 ret = i915_do_wait_request(dev, overlay->last_flip_req,
452 interruptible);
453 if (ret != 0)
454 return ret;
455
456 case SWITCH_OFF_STAGE_2:
457 intel_overlay_off_tail(overlay);
458 break;
459 default:
460 BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
461 }
462
463 overlay->hw_wedged = 0;
464 overlay->last_flip_req = 0;
465 return 0;
466}
467
468/* Wait for pending overlay flip and release old frame.
469 * Needs to be called before the overlay register are changed
470 * via intel_overlay_(un)map_regs_atomic */
471static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
472{
473 int ret;
474 struct drm_gem_object *obj;
475
476 /* only wait if there is actually an old frame to release to
477 * guarantee forward progress */
478 if (!overlay->old_vid_bo)
479 return 0;
480
481 ret = intel_overlay_wait_flip(overlay);
482 if (ret != 0)
483 return ret;
484
485 obj = overlay->old_vid_bo->obj;
486 i915_gem_object_unpin(obj);
487 drm_gem_object_unreference(obj);
488 overlay->old_vid_bo = NULL;
489
490 return 0;
491}
492
493struct put_image_params {
494 int format;
495 short dst_x;
496 short dst_y;
497 short dst_w;
498 short dst_h;
499 short src_w;
500 short src_scan_h;
501 short src_scan_w;
502 short src_h;
503 short stride_Y;
504 short stride_UV;
505 int offset_Y;
506 int offset_U;
507 int offset_V;
508};
509
510static int packed_depth_bytes(u32 format)
511{
512 switch (format & I915_OVERLAY_DEPTH_MASK) {
513 case I915_OVERLAY_YUV422:
514 return 4;
515 case I915_OVERLAY_YUV411:
516 /* return 6; not implemented */
517 default:
518 return -EINVAL;
519 }
520}
521
522static int packed_width_bytes(u32 format, short width)
523{
524 switch (format & I915_OVERLAY_DEPTH_MASK) {
525 case I915_OVERLAY_YUV422:
526 return width << 1;
527 default:
528 return -EINVAL;
529 }
530}
531
532static int uv_hsubsampling(u32 format)
533{
534 switch (format & I915_OVERLAY_DEPTH_MASK) {
535 case I915_OVERLAY_YUV422:
536 case I915_OVERLAY_YUV420:
537 return 2;
538 case I915_OVERLAY_YUV411:
539 case I915_OVERLAY_YUV410:
540 return 4;
541 default:
542 return -EINVAL;
543 }
544}
545
546static int uv_vsubsampling(u32 format)
547{
548 switch (format & I915_OVERLAY_DEPTH_MASK) {
549 case I915_OVERLAY_YUV420:
550 case I915_OVERLAY_YUV410:
551 return 2;
552 case I915_OVERLAY_YUV422:
553 case I915_OVERLAY_YUV411:
554 return 1;
555 default:
556 return -EINVAL;
557 }
558}
559
560static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
561{
562 u32 mask, shift, ret;
563 if (IS_I9XX(dev)) {
564 mask = 0x3f;
565 shift = 6;
566 } else {
567 mask = 0x1f;
568 shift = 5;
569 }
570 ret = ((offset + width + mask) >> shift) - (offset >> shift);
571 if (IS_I9XX(dev))
572 ret <<= 1;
573 ret -=1;
574 return ret << 2;
575}
576
577static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
578 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
579 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
580 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
581 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
582 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
583 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
584 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
585 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
586 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
587 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
588 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
589 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
590 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
591 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
592 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
593 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
594 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
595static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
596 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
597 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
598 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
599 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
600 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
601 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
602 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
603 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
604 0x3000, 0x0800, 0x3000};
605
606static void update_polyphase_filter(struct overlay_registers *regs)
607{
608 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
609 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
610}
611
612static bool update_scaling_factors(struct intel_overlay *overlay,
613 struct overlay_registers *regs,
614 struct put_image_params *params)
615{
616 /* fixed point with a 12 bit shift */
617 u32 xscale, yscale, xscale_UV, yscale_UV;
618#define FP_SHIFT 12
619#define FRACT_MASK 0xfff
620 bool scale_changed = false;
621 int uv_hscale = uv_hsubsampling(params->format);
622 int uv_vscale = uv_vsubsampling(params->format);
623
624 if (params->dst_w > 1)
625 xscale = ((params->src_scan_w - 1) << FP_SHIFT)
626 /(params->dst_w);
627 else
628 xscale = 1 << FP_SHIFT;
629
630 if (params->dst_h > 1)
631 yscale = ((params->src_scan_h - 1) << FP_SHIFT)
632 /(params->dst_h);
633 else
634 yscale = 1 << FP_SHIFT;
635
636 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
637 xscale_UV = xscale/uv_hscale;
638 yscale_UV = yscale/uv_vscale;
639 /* make the Y scale to UV scale ratio an exact multiply */
640 xscale = xscale_UV * uv_hscale;
641 yscale = yscale_UV * uv_vscale;
642 /*} else {
643 xscale_UV = 0;
644 yscale_UV = 0;
645 }*/
646
647 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
648 scale_changed = true;
649 overlay->old_xscale = xscale;
650 overlay->old_yscale = yscale;
651
652 regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
653 | ((xscale >> FP_SHIFT) << 16)
654 | ((xscale & FRACT_MASK) << 3);
655 regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
656 | ((xscale_UV >> FP_SHIFT) << 16)
657 | ((xscale_UV & FRACT_MASK) << 3);
658 regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
659 | ((yscale_UV >> FP_SHIFT) << 0);
660
661 if (scale_changed)
662 update_polyphase_filter(regs);
663
664 return scale_changed;
665}
666
667static void update_colorkey(struct intel_overlay *overlay,
668 struct overlay_registers *regs)
669{
670 u32 key = overlay->color_key;
671 switch (overlay->crtc->base.fb->bits_per_pixel) {
672 case 8:
673 regs->DCLRKV = 0;
674 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
675 case 16:
676 if (overlay->crtc->base.fb->depth == 15) {
677 regs->DCLRKV = RGB15_TO_COLORKEY(key);
678 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
679 } else {
680 regs->DCLRKV = RGB16_TO_COLORKEY(key);
681 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
682 }
683 case 24:
684 case 32:
685 regs->DCLRKV = key;
686 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
687 }
688}
689
690static u32 overlay_cmd_reg(struct put_image_params *params)
691{
692 u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
693
694 if (params->format & I915_OVERLAY_YUV_PLANAR) {
695 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
696 case I915_OVERLAY_YUV422:
697 cmd |= OCMD_YUV_422_PLANAR;
698 break;
699 case I915_OVERLAY_YUV420:
700 cmd |= OCMD_YUV_420_PLANAR;
701 break;
702 case I915_OVERLAY_YUV411:
703 case I915_OVERLAY_YUV410:
704 cmd |= OCMD_YUV_410_PLANAR;
705 break;
706 }
707 } else { /* YUV packed */
708 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
709 case I915_OVERLAY_YUV422:
710 cmd |= OCMD_YUV_422_PACKED;
711 break;
712 case I915_OVERLAY_YUV411:
713 cmd |= OCMD_YUV_411_PACKED;
714 break;
715 }
716
717 switch (params->format & I915_OVERLAY_SWAP_MASK) {
718 case I915_OVERLAY_NO_SWAP:
719 break;
720 case I915_OVERLAY_UV_SWAP:
721 cmd |= OCMD_UV_SWAP;
722 break;
723 case I915_OVERLAY_Y_SWAP:
724 cmd |= OCMD_Y_SWAP;
725 break;
726 case I915_OVERLAY_Y_AND_UV_SWAP:
727 cmd |= OCMD_Y_AND_UV_SWAP;
728 break;
729 }
730 }
731
732 return cmd;
733}
734
735int intel_overlay_do_put_image(struct intel_overlay *overlay,
736 struct drm_gem_object *new_bo,
737 struct put_image_params *params)
738{
739 int ret, tmp_width;
740 struct overlay_registers *regs;
741 bool scale_changed = false;
742 struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
743 struct drm_device *dev = overlay->dev;
744
745 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
746 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
747 BUG_ON(!overlay);
748
749 ret = intel_overlay_release_old_vid(overlay);
750 if (ret != 0)
751 return ret;
752
753 ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
754 if (ret != 0)
755 return ret;
756
757 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
758 if (ret != 0)
759 goto out_unpin;
760
761 if (!overlay->active) {
762 regs = intel_overlay_map_regs_atomic(overlay);
763 if (!regs) {
764 ret = -ENOMEM;
765 goto out_unpin;
766 }
767 regs->OCONFIG = OCONF_CC_OUT_8BIT;
768 if (IS_I965GM(overlay->dev))
769 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
770 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
771 OCONF_PIPE_A : OCONF_PIPE_B;
772 intel_overlay_unmap_regs_atomic(overlay);
773
774 ret = intel_overlay_on(overlay);
775 if (ret != 0)
776 goto out_unpin;
777 }
778
779 regs = intel_overlay_map_regs_atomic(overlay);
780 if (!regs) {
781 ret = -ENOMEM;
782 goto out_unpin;
783 }
784
785 regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
786 regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
787
788 if (params->format & I915_OVERLAY_YUV_PACKED)
789 tmp_width = packed_width_bytes(params->format, params->src_w);
790 else
791 tmp_width = params->src_w;
792
793 regs->SWIDTH = params->src_w;
794 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
795 params->offset_Y, tmp_width);
796 regs->SHEIGHT = params->src_h;
797 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
798 regs->OSTRIDE = params->stride_Y;
799
800 if (params->format & I915_OVERLAY_YUV_PLANAR) {
801 int uv_hscale = uv_hsubsampling(params->format);
802 int uv_vscale = uv_vsubsampling(params->format);
803 u32 tmp_U, tmp_V;
804 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
805 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
806 params->src_w/uv_hscale);
807 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
808 params->src_w/uv_hscale);
809 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
810 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
811 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
812 regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
813 regs->OSTRIDE |= params->stride_UV << 16;
814 }
815
816 scale_changed = update_scaling_factors(overlay, regs, params);
817
818 update_colorkey(overlay, regs);
819
820 regs->OCMD = overlay_cmd_reg(params);
821
822 intel_overlay_unmap_regs_atomic(overlay);
823
824 intel_overlay_continue(overlay, scale_changed);
825
826 overlay->old_vid_bo = overlay->vid_bo;
827 overlay->vid_bo = new_bo->driver_private;
828
829 return 0;
830
831out_unpin:
832 i915_gem_object_unpin(new_bo);
833 return ret;
834}
835
836int intel_overlay_switch_off(struct intel_overlay *overlay)
837{
838 int ret;
839 struct overlay_registers *regs;
840 struct drm_device *dev = overlay->dev;
841
842 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
843 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
844
845 if (overlay->hw_wedged) {
846 ret = intel_overlay_recover_from_interrupt(overlay, 1);
847 if (ret != 0)
848 return ret;
849 }
850
851 if (!overlay->active)
852 return 0;
853
854 ret = intel_overlay_release_old_vid(overlay);
855 if (ret != 0)
856 return ret;
857
858 regs = intel_overlay_map_regs_atomic(overlay);
859 regs->OCMD = 0;
860 intel_overlay_unmap_regs_atomic(overlay);
861
862 ret = intel_overlay_off(overlay);
863 if (ret != 0)
864 return ret;
865
866 intel_overlay_off_tail(overlay);
867
868 return 0;
869}
870
871static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
872 struct intel_crtc *crtc)
873{
874 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
875 u32 pipeconf;
876 int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
877
878 if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
879 return -EINVAL;
880
881 pipeconf = I915_READ(pipeconf_reg);
882
883 /* can't use the overlay with double wide pipe */
884 if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
885 return -EINVAL;
886
887 return 0;
888}
889
890static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
891{
892 struct drm_device *dev = overlay->dev;
893 drm_i915_private_t *dev_priv = dev->dev_private;
894 u32 ratio;
895 u32 pfit_control = I915_READ(PFIT_CONTROL);
896
897 /* XXX: This is not the same logic as in the xorg driver, but more in
898 * line with the intel documentation for the i965 */
899 if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
900 ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
901 } else { /* on i965 use the PGM reg to read out the autoscaler values */
902 ratio = I915_READ(PFIT_PGM_RATIOS);
903 if (IS_I965G(dev))
904 ratio >>= PFIT_VERT_SCALE_SHIFT_965;
905 else
906 ratio >>= PFIT_VERT_SCALE_SHIFT;
907 }
908
909 overlay->pfit_vscale_ratio = ratio;
910}
911
912static int check_overlay_dst(struct intel_overlay *overlay,
913 struct drm_intel_overlay_put_image *rec)
914{
915 struct drm_display_mode *mode = &overlay->crtc->base.mode;
916
917 if ((rec->dst_x < mode->crtc_hdisplay)
918 && (rec->dst_x + rec->dst_width
919 <= mode->crtc_hdisplay)
920 && (rec->dst_y < mode->crtc_vdisplay)
921 && (rec->dst_y + rec->dst_height
922 <= mode->crtc_vdisplay))
923 return 0;
924 else
925 return -EINVAL;
926}
927
928static int check_overlay_scaling(struct put_image_params *rec)
929{
930 u32 tmp;
931
932 /* downscaling limit is 8.0 */
933 tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
934 if (tmp > 7)
935 return -EINVAL;
936 tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
937 if (tmp > 7)
938 return -EINVAL;
939
940 return 0;
941}
942
943static int check_overlay_src(struct drm_device *dev,
944 struct drm_intel_overlay_put_image *rec,
945 struct drm_gem_object *new_bo)
946{
947 u32 stride_mask;
948 int depth;
949 int uv_hscale = uv_hsubsampling(rec->flags);
950 int uv_vscale = uv_vsubsampling(rec->flags);
951 size_t tmp;
952
953 /* check src dimensions */
954 if (IS_845G(dev) || IS_I830(dev)) {
955 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
956 || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
957 return -EINVAL;
958 } else {
959 if (rec->src_height > IMAGE_MAX_HEIGHT
960 || rec->src_width > IMAGE_MAX_WIDTH)
961 return -EINVAL;
962 }
963 /* better safe than sorry, use 4 as the maximal subsampling ratio */
964 if (rec->src_height < N_VERT_Y_TAPS*4
965 || rec->src_width < N_HORIZ_Y_TAPS*4)
966 return -EINVAL;
967
968 /* check alingment constrains */
969 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
970 case I915_OVERLAY_RGB:
971 /* not implemented */
972 return -EINVAL;
973 case I915_OVERLAY_YUV_PACKED:
974 depth = packed_depth_bytes(rec->flags);
975 if (uv_vscale != 1)
976 return -EINVAL;
977 if (depth < 0)
978 return depth;
979 /* ignore UV planes */
980 rec->stride_UV = 0;
981 rec->offset_U = 0;
982 rec->offset_V = 0;
983 /* check pixel alignment */
984 if (rec->offset_Y % depth)
985 return -EINVAL;
986 break;
987 case I915_OVERLAY_YUV_PLANAR:
988 if (uv_vscale < 0 || uv_hscale < 0)
989 return -EINVAL;
990 /* no offset restrictions for planar formats */
991 break;
992 default:
993 return -EINVAL;
994 }
995
996 if (rec->src_width % uv_hscale)
997 return -EINVAL;
998
999 /* stride checking */
1000 stride_mask = 63;
1001
1002 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1003 return -EINVAL;
1004 if (IS_I965G(dev) && rec->stride_Y < 512)
1005 return -EINVAL;
1006
1007 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
1008 4 : 8;
1009 if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
1010 return -EINVAL;
1011
1012 /* check buffer dimensions */
1013 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
1014 case I915_OVERLAY_RGB:
1015 case I915_OVERLAY_YUV_PACKED:
1016 /* always 4 Y values per depth pixels */
1017 if (packed_width_bytes(rec->flags, rec->src_width)
1018 > rec->stride_Y)
1019 return -EINVAL;
1020
1021 tmp = rec->stride_Y*rec->src_height;
1022 if (rec->offset_Y + tmp > new_bo->size)
1023 return -EINVAL;
1024 break;
1025 case I915_OVERLAY_YUV_PLANAR:
1026 if (rec->src_width > rec->stride_Y)
1027 return -EINVAL;
1028 if (rec->src_width/uv_hscale > rec->stride_UV)
1029 return -EINVAL;
1030
1031 tmp = rec->stride_Y*rec->src_height;
1032 if (rec->offset_Y + tmp > new_bo->size)
1033 return -EINVAL;
1034 tmp = rec->stride_UV*rec->src_height;
1035 tmp /= uv_vscale;
1036 if (rec->offset_U + tmp > new_bo->size
1037 || rec->offset_V + tmp > new_bo->size)
1038 return -EINVAL;
1039 break;
1040 }
1041
1042 return 0;
1043}
1044
1045int intel_overlay_put_image(struct drm_device *dev, void *data,
1046 struct drm_file *file_priv)
1047{
1048 struct drm_intel_overlay_put_image *put_image_rec = data;
1049 drm_i915_private_t *dev_priv = dev->dev_private;
1050 struct intel_overlay *overlay;
1051 struct drm_mode_object *drmmode_obj;
1052 struct intel_crtc *crtc;
1053 struct drm_gem_object *new_bo;
1054 struct put_image_params *params;
1055 int ret;
1056
1057 if (!dev_priv) {
1058 DRM_ERROR("called with no initialization\n");
1059 return -EINVAL;
1060 }
1061
1062 overlay = dev_priv->overlay;
1063 if (!overlay) {
1064 DRM_DEBUG("userspace bug: no overlay\n");
1065 return -ENODEV;
1066 }
1067
1068 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
1069 mutex_lock(&dev->mode_config.mutex);
1070 mutex_lock(&dev->struct_mutex);
1071
1072 ret = intel_overlay_switch_off(overlay);
1073
1074 mutex_unlock(&dev->struct_mutex);
1075 mutex_unlock(&dev->mode_config.mutex);
1076
1077 return ret;
1078 }
1079
1080 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
1081 if (!params)
1082 return -ENOMEM;
1083
1084 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
1085 DRM_MODE_OBJECT_CRTC);
1086 if (!drmmode_obj)
1087 return -ENOENT;
1088 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1089
1090 new_bo = drm_gem_object_lookup(dev, file_priv,
1091 put_image_rec->bo_handle);
1092 if (!new_bo)
1093 return -ENOENT;
1094
1095 mutex_lock(&dev->mode_config.mutex);
1096 mutex_lock(&dev->struct_mutex);
1097
1098 if (overlay->hw_wedged) {
1099 ret = intel_overlay_recover_from_interrupt(overlay, 1);
1100 if (ret != 0)
1101 goto out_unlock;
1102 }
1103
1104 if (overlay->crtc != crtc) {
1105 struct drm_display_mode *mode = &crtc->base.mode;
1106 ret = intel_overlay_switch_off(overlay);
1107 if (ret != 0)
1108 goto out_unlock;
1109
1110 ret = check_overlay_possible_on_crtc(overlay, crtc);
1111 if (ret != 0)
1112 goto out_unlock;
1113
1114 overlay->crtc = crtc;
1115 crtc->overlay = overlay;
1116
1117 if (intel_panel_fitter_pipe(dev) == crtc->pipe
1118 /* and line to wide, i.e. one-line-mode */
1119 && mode->hdisplay > 1024) {
1120 overlay->pfit_active = 1;
1121 update_pfit_vscale_ratio(overlay);
1122 } else
1123 overlay->pfit_active = 0;
1124 }
1125
1126 ret = check_overlay_dst(overlay, put_image_rec);
1127 if (ret != 0)
1128 goto out_unlock;
1129
1130 if (overlay->pfit_active) {
1131 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
1132 overlay->pfit_vscale_ratio);
1133 /* shifting right rounds downwards, so add 1 */
1134 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
1135 overlay->pfit_vscale_ratio) + 1;
1136 } else {
1137 params->dst_y = put_image_rec->dst_y;
1138 params->dst_h = put_image_rec->dst_height;
1139 }
1140 params->dst_x = put_image_rec->dst_x;
1141 params->dst_w = put_image_rec->dst_width;
1142
1143 params->src_w = put_image_rec->src_width;
1144 params->src_h = put_image_rec->src_height;
1145 params->src_scan_w = put_image_rec->src_scan_width;
1146 params->src_scan_h = put_image_rec->src_scan_height;
1147 if (params->src_scan_h > params->src_h
1148 || params->src_scan_w > params->src_w) {
1149 ret = -EINVAL;
1150 goto out_unlock;
1151 }
1152
1153 ret = check_overlay_src(dev, put_image_rec, new_bo);
1154 if (ret != 0)
1155 goto out_unlock;
1156 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
1157 params->stride_Y = put_image_rec->stride_Y;
1158 params->stride_UV = put_image_rec->stride_UV;
1159 params->offset_Y = put_image_rec->offset_Y;
1160 params->offset_U = put_image_rec->offset_U;
1161 params->offset_V = put_image_rec->offset_V;
1162
1163 /* Check scaling after src size to prevent a divide-by-zero. */
1164 ret = check_overlay_scaling(params);
1165 if (ret != 0)
1166 goto out_unlock;
1167
1168 ret = intel_overlay_do_put_image(overlay, new_bo, params);
1169 if (ret != 0)
1170 goto out_unlock;
1171
1172 mutex_unlock(&dev->struct_mutex);
1173 mutex_unlock(&dev->mode_config.mutex);
1174
1175 kfree(params);
1176
1177 return 0;
1178
1179out_unlock:
1180 mutex_unlock(&dev->struct_mutex);
1181 mutex_unlock(&dev->mode_config.mutex);
1182 drm_gem_object_unreference(new_bo);
1183 kfree(params);
1184
1185 return ret;
1186}
1187
1188static void update_reg_attrs(struct intel_overlay *overlay,
1189 struct overlay_registers *regs)
1190{
1191 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
1192 regs->OCLRC1 = overlay->saturation;
1193}
1194
1195static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
1196{
1197 int i;
1198
1199 if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
1200 return false;
1201
1202 for (i = 0; i < 3; i++) {
1203 if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
1204 return false;
1205 }
1206
1207 return true;
1208}
1209
1210static bool check_gamma5_errata(u32 gamma5)
1211{
1212 int i;
1213
1214 for (i = 0; i < 3; i++) {
1215 if (((gamma5 >> i*8) & 0xff) == 0x80)
1216 return false;
1217 }
1218
1219 return true;
1220}
1221
1222static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1223{
1224 if (!check_gamma_bounds(0, attrs->gamma0)
1225 || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
1226 || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
1227 || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
1228 || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
1229 || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
1230 || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
1231 return -EINVAL;
1232 if (!check_gamma5_errata(attrs->gamma5))
1233 return -EINVAL;
1234 return 0;
1235}
1236
1237int intel_overlay_attrs(struct drm_device *dev, void *data,
1238 struct drm_file *file_priv)
1239{
1240 struct drm_intel_overlay_attrs *attrs = data;
1241 drm_i915_private_t *dev_priv = dev->dev_private;
1242 struct intel_overlay *overlay;
1243 struct overlay_registers *regs;
1244 int ret;
1245
1246 if (!dev_priv) {
1247 DRM_ERROR("called with no initialization\n");
1248 return -EINVAL;
1249 }
1250
1251 overlay = dev_priv->overlay;
1252 if (!overlay) {
1253 DRM_DEBUG("userspace bug: no overlay\n");
1254 return -ENODEV;
1255 }
1256
1257 mutex_lock(&dev->mode_config.mutex);
1258 mutex_lock(&dev->struct_mutex);
1259
1260 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
1261 attrs->color_key = overlay->color_key;
1262 attrs->brightness = overlay->brightness;
1263 attrs->contrast = overlay->contrast;
1264 attrs->saturation = overlay->saturation;
1265
1266 if (IS_I9XX(dev)) {
1267 attrs->gamma0 = I915_READ(OGAMC0);
1268 attrs->gamma1 = I915_READ(OGAMC1);
1269 attrs->gamma2 = I915_READ(OGAMC2);
1270 attrs->gamma3 = I915_READ(OGAMC3);
1271 attrs->gamma4 = I915_READ(OGAMC4);
1272 attrs->gamma5 = I915_READ(OGAMC5);
1273 }
1274 ret = 0;
1275 } else {
1276 overlay->color_key = attrs->color_key;
1277 if (attrs->brightness >= -128 && attrs->brightness <= 127) {
1278 overlay->brightness = attrs->brightness;
1279 } else {
1280 ret = -EINVAL;
1281 goto out_unlock;
1282 }
1283 if (attrs->contrast <= 255) {
1284 overlay->contrast = attrs->contrast;
1285 } else {
1286 ret = -EINVAL;
1287 goto out_unlock;
1288 }
1289 if (attrs->saturation <= 1023) {
1290 overlay->saturation = attrs->saturation;
1291 } else {
1292 ret = -EINVAL;
1293 goto out_unlock;
1294 }
1295
1296 regs = intel_overlay_map_regs_atomic(overlay);
1297 if (!regs) {
1298 ret = -ENOMEM;
1299 goto out_unlock;
1300 }
1301
1302 update_reg_attrs(overlay, regs);
1303
1304 intel_overlay_unmap_regs_atomic(overlay);
1305
1306 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1307 if (!IS_I9XX(dev)) {
1308 ret = -EINVAL;
1309 goto out_unlock;
1310 }
1311
1312 if (overlay->active) {
1313 ret = -EBUSY;
1314 goto out_unlock;
1315 }
1316
1317 ret = check_gamma(attrs);
1318 if (ret != 0)
1319 goto out_unlock;
1320
1321 I915_WRITE(OGAMC0, attrs->gamma0);
1322 I915_WRITE(OGAMC1, attrs->gamma1);
1323 I915_WRITE(OGAMC2, attrs->gamma2);
1324 I915_WRITE(OGAMC3, attrs->gamma3);
1325 I915_WRITE(OGAMC4, attrs->gamma4);
1326 I915_WRITE(OGAMC5, attrs->gamma5);
1327 }
1328 ret = 0;
1329 }
1330
1331out_unlock:
1332 mutex_unlock(&dev->struct_mutex);
1333 mutex_unlock(&dev->mode_config.mutex);
1334
1335 return ret;
1336}
1337
1338void intel_setup_overlay(struct drm_device *dev)
1339{
1340 drm_i915_private_t *dev_priv = dev->dev_private;
1341 struct intel_overlay *overlay;
1342 struct drm_gem_object *reg_bo;
1343 struct overlay_registers *regs;
1344 int ret;
1345
1346 if (!OVERLAY_EXISTS(dev))
1347 return;
1348
1349 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
1350 if (!overlay)
1351 return;
1352 overlay->dev = dev;
1353
1354 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1355 if (!reg_bo)
1356 goto out_free;
1357 overlay->reg_bo = reg_bo->driver_private;
1358
1359 if (OVERLAY_NONPHYSICAL(dev)) {
1360 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1361 if (ret) {
1362 DRM_ERROR("failed to pin overlay register bo\n");
1363 goto out_free_bo;
1364 }
1365 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1366 } else {
1367 ret = i915_gem_attach_phys_object(dev, reg_bo,
1368 I915_GEM_PHYS_OVERLAY_REGS);
1369 if (ret) {
1370 DRM_ERROR("failed to attach phys overlay regs\n");
1371 goto out_free_bo;
1372 }
1373 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1374 }
1375
1376 /* init all values */
1377 overlay->color_key = 0x0101fe;
1378 overlay->brightness = -19;
1379 overlay->contrast = 75;
1380 overlay->saturation = 146;
1381
1382 regs = intel_overlay_map_regs_atomic(overlay);
1383 if (!regs)
1384 goto out_free_bo;
1385
1386 memset(regs, 0, sizeof(struct overlay_registers));
1387 update_polyphase_filter(regs);
1388
1389 update_reg_attrs(overlay, regs);
1390
1391 intel_overlay_unmap_regs_atomic(overlay);
1392
1393 dev_priv->overlay = overlay;
1394 DRM_INFO("initialized overlay support\n");
1395 return;
1396
1397out_free_bo:
1398 drm_gem_object_unreference(reg_bo);
1399out_free:
1400 kfree(overlay);
1401 return;
1402}
1403
1404void intel_cleanup_overlay(struct drm_device *dev)
1405{
1406 drm_i915_private_t *dev_priv = dev->dev_private;
1407
1408 if (dev_priv->overlay) {
1409 /* The bo's should be free'd by the generic code already.
1410 * Furthermore modesetting teardown happens beforehand so the
1411 * hardware should be off already */
1412 BUG_ON(dev_priv->overlay->active);
1413
1414 kfree(dev_priv->overlay);
1415 }
1416}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7fa3279e2f8..24a3dc99716c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38 38
39#undef SDVO_DEBUG
40
41static char *tv_format_names[] = { 39static char *tv_format_names[] = {
42 "NTSC_M" , "NTSC_J" , "NTSC_443", 40 "NTSC_M" , "NTSC_J" , "NTSC_443",
43 "PAL_B" , "PAL_D" , "PAL_G" , 41 "PAL_B" , "PAL_D" , "PAL_G" ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
356#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 354#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 355#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
358 356
359#ifdef SDVO_DEBUG
360static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, 357static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
361 void *args, int args_len) 358 void *args, int args_len)
362{ 359{
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
379 DRM_LOG_KMS("(%02X)", cmd); 376 DRM_LOG_KMS("(%02X)", cmd);
380 DRM_LOG_KMS("\n"); 377 DRM_LOG_KMS("\n");
381} 378}
382#else
383#define intel_sdvo_debug_write(o, c, a, l)
384#endif
385 379
386static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 380static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
387 void *args, int args_len) 381 void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
398 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 392 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
399} 393}
400 394
401#ifdef SDVO_DEBUG
402static const char *cmd_status_names[] = { 395static const char *cmd_status_names[] = {
403 "Power on", 396 "Power on",
404 "Success", 397 "Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
427 DRM_LOG_KMS("(??? %d)", status); 420 DRM_LOG_KMS("(??? %d)", status);
428 DRM_LOG_KMS("\n"); 421 DRM_LOG_KMS("\n");
429} 422}
430#else
431#define intel_sdvo_debug_response(o, r, l, s)
432#endif
433 423
434static u8 intel_sdvo_read_response(struct intel_output *intel_output, 424static u8 intel_sdvo_read_response(struct intel_output *intel_output,
435 void *response, int response_len) 425 void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1627 1617
1628 intel_sdvo_write_cmd(intel_output, 1618 intel_sdvo_write_cmd(intel_output,
1629 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1619 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1620 if (sdvo_priv->is_tv) {
1621 /* add 30ms delay when the output type is SDVO-TV */
1622 mdelay(30);
1623 }
1630 status = intel_sdvo_read_response(intel_output, &response, 2); 1624 status = intel_sdvo_read_response(intel_output, &response, 2);
1631 1625
1632 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1626 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931afb..552ec110b741 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1213 tv_ctl |= TV_TRILEVEL_SYNC; 1213 tv_ctl |= TV_TRILEVEL_SYNC;
1214 if (tv_mode->pal_burst) 1214 if (tv_mode->pal_burst)
1215 tv_ctl |= TV_PAL_BURST; 1215 tv_ctl |= TV_PAL_BURST;
1216
1216 scctl1 = 0; 1217 scctl1 = 0;
1217 /* dda1 implies valid video levels */ 1218 if (tv_mode->dda1_inc)
1218 if (tv_mode->dda1_inc) {
1219 scctl1 |= TV_SC_DDA1_EN; 1219 scctl1 |= TV_SC_DDA1_EN;
1220 }
1221
1222 if (tv_mode->dda2_inc) 1220 if (tv_mode->dda2_inc)
1223 scctl1 |= TV_SC_DDA2_EN; 1221 scctl1 |= TV_SC_DDA2_EN;
1224
1225 if (tv_mode->dda3_inc) 1222 if (tv_mode->dda3_inc)
1226 scctl1 |= TV_SC_DDA3_EN; 1223 scctl1 |= TV_SC_DDA3_EN;
1227
1228 scctl1 |= tv_mode->sc_reset; 1224 scctl1 |= tv_mode->sc_reset;
1229 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; 1225 if (video_levels)
1226 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1230 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1227 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1231 1228
1232 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1229 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1416 * 0 0 0 Component 1413 * 0 0 0 Component
1417 */ 1414 */
1418 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1415 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1419 DRM_DEBUG("Detected Composite TV connection\n"); 1416 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1420 type = DRM_MODE_CONNECTOR_Composite; 1417 type = DRM_MODE_CONNECTOR_Composite;
1421 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1418 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1422 DRM_DEBUG("Detected S-Video TV connection\n"); 1419 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1423 type = DRM_MODE_CONNECTOR_SVIDEO; 1420 type = DRM_MODE_CONNECTOR_SVIDEO;
1424 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1421 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1425 DRM_DEBUG("Detected Component TV connection\n"); 1422 DRM_DEBUG_KMS("Detected Component TV connection\n");
1426 type = DRM_MODE_CONNECTOR_Component; 1423 type = DRM_MODE_CONNECTOR_Component;
1427 } else { 1424 } else {
1428 DRM_DEBUG("No TV connection detected\n"); 1425 DRM_DEBUG_KMS("No TV connection detected\n");
1429 type = -1; 1426 type = -1;
1430 } 1427 }
1431 1428
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1702 .destroy = intel_tv_enc_destroy, 1699 .destroy = intel_tv_enc_destroy,
1703}; 1700};
1704 1701
1702/*
1703 * Enumerate the child dev array parsed from VBT to check whether
1704 * the integrated TV is present.
1705 * If it is present, return 1.
1706 * If it is not present, return false.
1707 * If no child dev is parsed from VBT, it assumes that the TV is present.
1708 */
1709static int tv_is_present_in_vbt(struct drm_device *dev)
1710{
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 struct child_device_config *p_child;
1713 int i, ret;
1714
1715 if (!dev_priv->child_dev_num)
1716 return 1;
1717
1718 ret = 0;
1719 for (i = 0; i < dev_priv->child_dev_num; i++) {
1720 p_child = dev_priv->child_dev + i;
1721 /*
1722 * If the device type is not TV, continue.
1723 */
1724 if (p_child->device_type != DEVICE_TYPE_INT_TV &&
1725 p_child->device_type != DEVICE_TYPE_TV)
1726 continue;
1727 /* Only when the addin_offset is non-zero, it is regarded
1728 * as present.
1729 */
1730 if (p_child->addin_offset) {
1731 ret = 1;
1732 break;
1733 }
1734 }
1735 return ret;
1736}
1705 1737
1706void 1738void
1707intel_tv_init(struct drm_device *dev) 1739intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
1717 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1749 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
1718 return; 1750 return;
1719 1751
1752 if (!tv_is_present_in_vbt(dev)) {
1753 DRM_DEBUG_KMS("Integrated TV is not present.\n");
1754 return;
1755 }
1720 /* Even if we have an encoder we may not have a connector */ 1756 /* Even if we have an encoder we may not have a connector */
1721 if (!dev_priv->int_tv_support) 1757 if (!dev_priv->int_tv_support)
1722 return; 1758 return;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eedd6e1..feb52eee4314 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
52 r600_blit_kms.o radeon_pm.o 52 r600_blit_kms.o radeon_pm.o atombios_dp.o
53 53
54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
55 55
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c42555ab9..6578d19dff93 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
263 case ATOM_ARG_FB: 263 case ATOM_ARG_FB:
264 idx = U8(*ptr); 264 idx = U8(*ptr);
265 (*ptr)++; 265 (*ptr)++;
266 val = gctx->scratch[((gctx->fb_base + idx) / 4)];
266 if (print) 267 if (print)
267 DEBUG("FB[0x%02X]", idx); 268 DEBUG("FB[0x%02X]", idx);
268 printk(KERN_INFO "FB access is not implemented.\n"); 269 break;
269 return 0;
270 case ATOM_ARG_IMM: 270 case ATOM_ARG_IMM:
271 switch (align) { 271 switch (align) {
272 case ATOM_SRC_DWORD: 272 case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
488 case ATOM_ARG_FB: 488 case ATOM_ARG_FB:
489 idx = U8(*ptr); 489 idx = U8(*ptr);
490 (*ptr)++; 490 (*ptr)++;
491 gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
491 DEBUG("FB[0x%02X]", idx); 492 DEBUG("FB[0x%02X]", idx);
492 printk(KERN_INFO "FB access is not implemented.\n"); 493 break;
493 return;
494 case ATOM_ARG_PLL: 494 case ATOM_ARG_PLL:
495 idx = U8(*ptr); 495 idx = U8(*ptr);
496 (*ptr)++; 496 (*ptr)++;
@@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1214 *crev = CU8(idx + 3); 1214 *crev = CU8(idx + 3);
1215 return; 1215 return;
1216} 1216}
1217
1218int atom_allocate_fb_scratch(struct atom_context *ctx)
1219{
1220 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1221 uint16_t data_offset;
1222 int usage_bytes;
1223 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1224
1225 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
1226
1227 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1228
1229 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1230 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1231 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1232
1233 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1234 if (usage_bytes == 0)
1235 usage_bytes = 20 * 1024;
1236 /* allocate some scratch memory */
1237 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1238 if (!ctx->scratch)
1239 return -ENOMEM;
1240 return 0;
1241}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f2bcae..6671848e5ea1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -132,6 +132,7 @@ struct atom_context {
132 uint8_t shift; 132 uint8_t shift;
133 int cs_equal, cs_above; 133 int cs_equal, cs_above;
134 int io_mode; 134 int io_mode;
135 uint32_t *scratch;
135}; 136};
136 137
137extern int atom_debug; 138extern int atom_debug;
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
142void atom_destroy(struct atom_context *); 143void atom_destroy(struct atom_context *);
143void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); 144void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
144void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); 145void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
146int atom_allocate_fb_scratch(struct atom_context *ctx);
145#include "atom-types.h" 147#include "atom-types.h"
146#include "atombios.h" 148#include "atombios.h"
147#include "ObjectID.h" 149#include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6643afc36cea..5f48515c77a7 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
2680typedef struct _ATOM_HPD_INT_RECORD { 2680typedef struct _ATOM_HPD_INT_RECORD {
2681 ATOM_COMMON_RECORD_HEADER sheader; 2681 ATOM_COMMON_RECORD_HEADER sheader;
2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2683 UCHAR ucPluggged_PinState; 2683 UCHAR ucPlugged_PinState;
2684} ATOM_HPD_INT_RECORD; 2684} ATOM_HPD_INT_RECORD;
2685 2685
2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { 2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a590ff..260fcf59f00c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
241{ 241{
242 struct drm_device *dev = crtc->dev; 242 struct drm_device *dev = crtc->dev;
243 struct radeon_device *rdev = dev->dev_private; 243 struct radeon_device *rdev = dev->dev_private;
244 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
244 245
245 switch (mode) { 246 switch (mode) {
246 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
248 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
249 atombios_enable_crtc_memreq(crtc, 1); 250 atombios_enable_crtc_memreq(crtc, 1);
250 atombios_blank_crtc(crtc, 0); 251 atombios_blank_crtc(crtc, 0);
252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 radeon_crtc_load_lut(crtc);
251 break; 254 break;
252 case DRM_MODE_DPMS_STANDBY: 255 case DRM_MODE_DPMS_STANDBY:
253 case DRM_MODE_DPMS_SUSPEND: 256 case DRM_MODE_DPMS_SUSPEND:
254 case DRM_MODE_DPMS_OFF: 257 case DRM_MODE_DPMS_OFF:
258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
255 atombios_blank_crtc(crtc, 1); 259 atombios_blank_crtc(crtc, 1);
256 if (ASIC_IS_DCE3(rdev)) 260 if (ASIC_IS_DCE3(rdev))
257 atombios_enable_crtc_memreq(crtc, 0); 261 atombios_enable_crtc_memreq(crtc, 0);
258 atombios_enable_crtc(crtc, 0); 262 atombios_enable_crtc(crtc, 0);
259 break; 263 break;
260 } 264 }
261
262 if (mode != DRM_MODE_DPMS_OFF) {
263 radeon_crtc_load_lut(crtc);
264 }
265} 265}
266 266
267static void 267static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
457 if (encoder->encoder_type != 457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC) 458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (!ASIC_IS_AVIVO(rdev) 460 if (encoder->encoder_type ==
461 && (encoder->encoder_type == 461 DRM_MODE_ENCODER_LVDS)
462 DRM_MODE_ENCODER_LVDS))
463 pll_flags |= RADEON_PLL_USE_REF_DIV; 462 pll_flags |= RADEON_PLL_USE_REF_DIV;
464 } 463 }
465 radeon_encoder = to_radeon_encoder(encoder); 464 radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
500 else 499 else
501 pll = &rdev->clock.p2pll; 500 pll = &rdev->clock.p2pll;
502 501
503 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 502 if (ASIC_IS_AVIVO(rdev)) {
504 &ref_div, &post_div, pll_flags); 503 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags);
507 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags);
511 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags);
505 514
506 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
507 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
574 struct radeon_device *rdev = dev->dev_private; 583 struct radeon_device *rdev = dev->dev_private;
575 struct radeon_framebuffer *radeon_fb; 584 struct radeon_framebuffer *radeon_fb;
576 struct drm_gem_object *obj; 585 struct drm_gem_object *obj;
577 struct drm_radeon_gem_object *obj_priv; 586 struct radeon_bo *rbo;
578 uint64_t fb_location; 587 uint64_t fb_location;
579 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 588 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
589 int r;
580 590
581 if (!crtc->fb) 591 /* no fb bound */
582 return -EINVAL; 592 if (!crtc->fb) {
593 DRM_DEBUG("No FB bound\n");
594 return 0;
595 }
583 596
584 radeon_fb = to_radeon_framebuffer(crtc->fb); 597 radeon_fb = to_radeon_framebuffer(crtc->fb);
585 598
599 /* Pin framebuffer & get tilling informations */
586 obj = radeon_fb->obj; 600 obj = radeon_fb->obj;
587 obj_priv = obj->driver_private; 601 rbo = obj->driver_private;
588 602 r = radeon_bo_reserve(rbo, false);
589 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { 603 if (unlikely(r != 0))
604 return r;
605 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
606 if (unlikely(r != 0)) {
607 radeon_bo_unreserve(rbo);
590 return -EINVAL; 608 return -EINVAL;
591 } 609 }
610 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
611 radeon_bo_unreserve(rbo);
592 612
593 switch (crtc->fb->bits_per_pixel) { 613 switch (crtc->fb->bits_per_pixel) {
594 case 8: 614 case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
618 return -EINVAL; 638 return -EINVAL;
619 } 639 }
620 640
621 radeon_object_get_tiling_flags(obj->driver_private,
622 &tiling_flags, NULL);
623 if (tiling_flags & RADEON_TILING_MACRO) 641 if (tiling_flags & RADEON_TILING_MACRO)
624 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; 642 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
625 643
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
674 692
675 if (old_fb && old_fb != crtc->fb) { 693 if (old_fb && old_fb != crtc->fb) {
676 radeon_fb = to_radeon_framebuffer(old_fb); 694 radeon_fb = to_radeon_framebuffer(old_fb);
677 radeon_gem_object_unpin(radeon_fb->obj); 695 rbo = radeon_fb->obj->driver_private;
696 r = radeon_bo_reserve(rbo, false);
697 if (unlikely(r != 0))
698 return r;
699 radeon_bo_unpin(rbo);
700 radeon_bo_unreserve(rbo);
678 } 701 }
679 702
680 /* Bytes per pixel may have changed */ 703 /* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 000000000000..0d63c4436e7c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include "atom-bits.h"
32#include "drm_dp_helper.h"
33
34/* move these to drm_dp_helper.c/h */
35#define DP_LINK_CONFIGURATION_SIZE 9
36#define DP_LINK_STATUS_SIZE 6
37#define DP_DPCD_SIZE 8
38
39static char *voltage_names[] = {
40 "0.4V", "0.6V", "0.8V", "1.2V"
41};
42static char *pre_emph_names[] = {
43 "0dB", "3.5dB", "6dB", "9.5dB"
44};
45
46static const int dp_clocks[] = {
47 54000, /* 1 lane, 1.62 Ghz */
48 90000, /* 1 lane, 2.70 Ghz */
49 108000, /* 2 lane, 1.62 Ghz */
50 180000, /* 2 lane, 2.70 Ghz */
51 216000, /* 4 lane, 1.62 Ghz */
52 360000, /* 4 lane, 2.70 Ghz */
53};
54
55static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
56
57/* common helper functions */
58static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
59{
60 int i;
61 u8 max_link_bw;
62 u8 max_lane_count;
63
64 if (!dpcd)
65 return 0;
66
67 max_link_bw = dpcd[DP_MAX_LINK_RATE];
68 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
69
70 switch (max_link_bw) {
71 case DP_LINK_BW_1_62:
72 default:
73 for (i = 0; i < num_dp_clocks; i++) {
74 if (i % 2)
75 continue;
76 switch (max_lane_count) {
77 case 1:
78 if (i > 1)
79 return 0;
80 break;
81 case 2:
82 if (i > 3)
83 return 0;
84 break;
85 case 4:
86 default:
87 break;
88 }
89 if (dp_clocks[i] > mode_clock) {
90 if (i < 2)
91 return 1;
92 else if (i < 4)
93 return 2;
94 else
95 return 4;
96 }
97 }
98 break;
99 case DP_LINK_BW_2_7:
100 for (i = 0; i < num_dp_clocks; i++) {
101 switch (max_lane_count) {
102 case 1:
103 if (i > 1)
104 return 0;
105 break;
106 case 2:
107 if (i > 3)
108 return 0;
109 break;
110 case 4:
111 default:
112 break;
113 }
114 if (dp_clocks[i] > mode_clock) {
115 if (i < 2)
116 return 1;
117 else if (i < 4)
118 return 2;
119 else
120 return 4;
121 }
122 }
123 break;
124 }
125
126 return 0;
127}
128
129static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
130{
131 int i;
132 u8 max_link_bw;
133 u8 max_lane_count;
134
135 if (!dpcd)
136 return 0;
137
138 max_link_bw = dpcd[DP_MAX_LINK_RATE];
139 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 default:
144 for (i = 0; i < num_dp_clocks; i++) {
145 if (i % 2)
146 continue;
147 switch (max_lane_count) {
148 case 1:
149 if (i > 1)
150 return 0;
151 break;
152 case 2:
153 if (i > 3)
154 return 0;
155 break;
156 case 4:
157 default:
158 break;
159 }
160 if (dp_clocks[i] > mode_clock)
161 return 162000;
162 }
163 break;
164 case DP_LINK_BW_2_7:
165 for (i = 0; i < num_dp_clocks; i++) {
166 switch (max_lane_count) {
167 case 1:
168 if (i > 1)
169 return 0;
170 break;
171 case 2:
172 if (i > 3)
173 return 0;
174 break;
175 case 4:
176 default:
177 break;
178 }
179 if (dp_clocks[i] > mode_clock)
180 return (i % 2) ? 270000 : 162000;
181 }
182 }
183
184 return 0;
185}
186
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
188{
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
190 int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
191
192 if ((lanes == 0) || (bw == 0))
193 return MODE_CLOCK_HIGH;
194
195 return MODE_OK;
196}
197
198static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
199{
200 return link_status[r - DP_LANE0_1_STATUS];
201}
202
203static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
204 int lane)
205{
206 int i = DP_LANE0_1_STATUS + (lane >> 1);
207 int s = (lane & 1) * 4;
208 u8 l = dp_link_status(link_status, i);
209 return (l >> s) & 0xf;
210}
211
212static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
213 int lane_count)
214{
215 int lane;
216 u8 lane_status;
217
218 for (lane = 0; lane < lane_count; lane++) {
219 lane_status = dp_get_lane_status(link_status, lane);
220 if ((lane_status & DP_LANE_CR_DONE) == 0)
221 return false;
222 }
223 return true;
224}
225
226static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
227 int lane_count)
228{
229 u8 lane_align;
230 u8 lane_status;
231 int lane;
232
233 lane_align = dp_link_status(link_status,
234 DP_LANE_ALIGN_STATUS_UPDATED);
235 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
236 return false;
237 for (lane = 0; lane < lane_count; lane++) {
238 lane_status = dp_get_lane_status(link_status, lane);
239 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
240 return false;
241 }
242 return true;
243}
244
245static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
246 int lane)
247
248{
249 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
250 int s = ((lane & 1) ?
251 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
252 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
253 u8 l = dp_link_status(link_status, i);
254
255 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
256}
257
258static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
259 int lane)
260{
261 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
262 int s = ((lane & 1) ?
263 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
264 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
265 u8 l = dp_link_status(link_status, i);
266
267 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
268}
269
270/* XXX fix me -- chip specific */
271#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
272static u8 dp_pre_emphasis_max(u8 voltage_swing)
273{
274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
275 case DP_TRAIN_VOLTAGE_SWING_400:
276 return DP_TRAIN_PRE_EMPHASIS_6;
277 case DP_TRAIN_VOLTAGE_SWING_600:
278 return DP_TRAIN_PRE_EMPHASIS_6;
279 case DP_TRAIN_VOLTAGE_SWING_800:
280 return DP_TRAIN_PRE_EMPHASIS_3_5;
281 case DP_TRAIN_VOLTAGE_SWING_1200:
282 default:
283 return DP_TRAIN_PRE_EMPHASIS_0;
284 }
285}
286
287static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
288 int lane_count,
289 u8 train_set[4])
290{
291 u8 v = 0;
292 u8 p = 0;
293 int lane;
294
295 for (lane = 0; lane < lane_count; lane++) {
296 u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
297 u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
298
299 DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
300 lane,
301 voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
302 pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
303
304 if (this_v > v)
305 v = this_v;
306 if (this_p > p)
307 p = this_p;
308 }
309
310 if (v >= DP_VOLTAGE_MAX)
311 v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
312
313 if (p >= dp_pre_emphasis_max(v))
314 p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
315
316 DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
317 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
318 pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
319
320 for (lane = 0; lane < 4; lane++)
321 train_set[lane] = v | p;
322}
323
324
325/* radeon aux chan functions */
326bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
327 int num_bytes, u8 *read_byte,
328 u8 read_buf_len, u8 delay)
329{
330 struct drm_device *dev = chan->dev;
331 struct radeon_device *rdev = dev->dev_private;
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base;
335
336 memset(&args, 0, sizeof(args));
337
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339
340 memcpy(base, req_bytes, num_bytes);
341
342 args.lpAuxRequest = 0;
343 args.lpDataOut = 16;
344 args.ucDataOutLen = 0;
345 args.ucChannelID = chan->rec.i2c_id;
346 args.ucDelay = delay / 10;
347
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349
350 if (args.ucReplyStatus) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus);
354 return false;
355 }
356
357 if (args.ucDataOutLen && read_byte && read_buf_len) {
358 if (read_buf_len < args.ucDataOutLen) {
359 DRM_ERROR("Buffer to small for return answer %d %d\n",
360 read_buf_len, args.ucDataOutLen);
361 return false;
362 }
363 {
364 int len = min(read_buf_len, args.ucDataOutLen);
365 memcpy(read_byte, base + 16, len);
366 }
367 }
368 return true;
369}
370
371bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
372 uint8_t send_bytes, uint8_t *send)
373{
374 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
375 u8 msg[20];
376 u8 msg_len, dp_msg_len;
377 bool ret;
378
379 dp_msg_len = 4;
380 msg[0] = address;
381 msg[1] = address >> 8;
382 msg[2] = AUX_NATIVE_WRITE << 4;
383 dp_msg_len += send_bytes;
384 msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
385
386 if (send_bytes > 16)
387 return false;
388
389 memcpy(&msg[4], send, send_bytes);
390 msg_len = 4 + send_bytes;
391 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
392 return ret;
393}
394
395bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
396 uint8_t delay, uint8_t expected_bytes,
397 uint8_t *read_p)
398{
399 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
400 u8 msg[20];
401 u8 msg_len, dp_msg_len;
402 bool ret = false;
403 msg_len = 4;
404 dp_msg_len = 4;
405 msg[0] = address;
406 msg[1] = address >> 8;
407 msg[2] = AUX_NATIVE_READ << 4;
408 msg[3] = (dp_msg_len) << 4;
409 msg[3] |= expected_bytes - 1;
410
411 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
412 return ret;
413}
414
415/* radeon dp functions */
416static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
417 uint8_t ucconfig, uint8_t lane_num)
418{
419 DP_ENCODER_SERVICE_PARAMETERS args;
420 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
421
422 memset(&args, 0, sizeof(args));
423 args.ucLinkClock = dp_clock / 10;
424 args.ucConfig = ucconfig;
425 args.ucAction = action;
426 args.ucLaneNum = lane_num;
427 args.ucStatus = 0;
428
429 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
430 return args.ucStatus;
431}
432
433u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
434{
435 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
436 struct drm_device *dev = radeon_connector->base.dev;
437 struct radeon_device *rdev = dev->dev_private;
438
439 return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
440 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
441}
442
443bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
444{
445 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
446 u8 msg[25];
447 int ret;
448
449 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
450 if (ret) {
451 memcpy(dig_connector->dpcd, msg, 8);
452 {
453 int i;
454 DRM_DEBUG("DPCD: ");
455 for (i = 0; i < 8; i++)
456 DRM_DEBUG("%02x ", msg[i]);
457 DRM_DEBUG("\n");
458 }
459 return true;
460 }
461 dig_connector->dpcd[0] = 0;
462 return false;
463}
464
465void radeon_dp_set_link_config(struct drm_connector *connector,
466 struct drm_display_mode *mode)
467{
468 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector;
470
471 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
472 return;
473
474 radeon_connector = to_radeon_connector(connector);
475 if (!radeon_connector->con_priv)
476 return;
477 dig_connector = radeon_connector->con_priv;
478
479 dig_connector->dp_clock =
480 dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
481 dig_connector->dp_lane_count =
482 dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
483}
484
485int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
486 struct drm_display_mode *mode)
487{
488 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
489
490 return dp_mode_valid(dig_connector->dpcd, mode->clock);
491}
492
493static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
494 u8 link_status[DP_LINK_STATUS_SIZE])
495{
496 int ret;
497 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
498 DP_LINK_STATUS_SIZE, link_status);
499 if (!ret) {
500 DRM_ERROR("displayport link status failed\n");
501 return false;
502 }
503
504 DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
505 link_status[0], link_status[1], link_status[2],
506 link_status[3], link_status[4], link_status[5]);
507 return true;
508}
509
510bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
511{
512 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
513 u8 link_status[DP_LINK_STATUS_SIZE];
514
515 if (!atom_dp_get_link_status(radeon_connector, link_status))
516 return false;
517 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
518 return false;
519 return true;
520}
521
522static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
523{
524 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
525
526 if (dig_connector->dpcd[0] >= 0x11) {
527 radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
528 &power_state);
529 }
530}
531
532static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
533{
534 radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
535 &downspread);
536}
537
538static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
539 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
540{
541 radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
542 link_configuration);
543}
544
545static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
546 struct drm_encoder *encoder,
547 u8 train_set[4])
548{
549 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
550 int i;
551
552 for (i = 0; i < dig_connector->dp_lane_count; i++)
553 atombios_dig_transmitter_setup(encoder,
554 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
555 i, train_set[i]);
556
557 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
558 dig_connector->dp_lane_count, train_set);
559}
560
561static void dp_set_training(struct radeon_connector *radeon_connector,
562 u8 training)
563{
564 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
565 1, &training);
566}
567
568void dp_link_train(struct drm_encoder *encoder,
569 struct drm_connector *connector)
570{
571 struct drm_device *dev = encoder->dev;
572 struct radeon_device *rdev = dev->dev_private;
573 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
574 struct radeon_encoder_atom_dig *dig;
575 struct radeon_connector *radeon_connector;
576 struct radeon_connector_atom_dig *dig_connector;
577 int enc_id = 0;
578 bool clock_recovery, channel_eq;
579 u8 link_status[DP_LINK_STATUS_SIZE];
580 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
581 u8 tries, voltage;
582 u8 train_set[4];
583 int i;
584
585 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
586 return;
587
588 if (!radeon_encoder->enc_priv)
589 return;
590 dig = radeon_encoder->enc_priv;
591
592 radeon_connector = to_radeon_connector(connector);
593 if (!radeon_connector->con_priv)
594 return;
595 dig_connector = radeon_connector->con_priv;
596
597 if (ASIC_IS_DCE32(rdev)) {
598 if (dig->dig_block)
599 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
600 else
601 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
602 if (dig_connector->linkb)
603 enc_id |= ATOM_DP_CONFIG_LINK_B;
604 else
605 enc_id |= ATOM_DP_CONFIG_LINK_A;
606 } else {
607 if (dig_connector->linkb)
608 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
609 else
610 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
611 }
612
613 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
614 if (dig_connector->dp_clock == 270000)
615 link_configuration[0] = DP_LINK_BW_2_7;
616 else
617 link_configuration[0] = DP_LINK_BW_1_62;
618 link_configuration[1] = dig_connector->dp_lane_count;
619 if (dig_connector->dpcd[0] >= 0x11)
620 link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
621
622 /* power up the sink */
623 dp_set_power(radeon_connector, DP_SET_POWER_D0);
624 /* disable the training pattern on the sink */
625 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
626 /* set link bw and lanes on the sink */
627 dp_set_link_bw_lanes(radeon_connector, link_configuration);
628 /* disable downspread on the sink */
629 dp_set_downspread(radeon_connector, 0);
630 /* start training on the source */
631 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
632 dig_connector->dp_clock, enc_id, 0);
633 /* set training pattern 1 on the source */
634 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
635 dig_connector->dp_clock, enc_id, 0);
636
637 /* set initial vs/emph */
638 memset(train_set, 0, 4);
639 udelay(400);
640 /* set training pattern 1 on the sink */
641 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
642
643 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
644
645 /* clock recovery loop */
646 clock_recovery = false;
647 tries = 0;
648 voltage = 0xff;
649 for (;;) {
650 udelay(100);
651 if (!atom_dp_get_link_status(radeon_connector, link_status))
652 break;
653
654 if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
655 clock_recovery = true;
656 break;
657 }
658
659 for (i = 0; i < dig_connector->dp_lane_count; i++) {
660 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
661 break;
662 }
663 if (i == dig_connector->dp_lane_count) {
664 DRM_ERROR("clock recovery reached max voltage\n");
665 break;
666 }
667
668 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
669 ++tries;
670 if (tries == 5) {
671 DRM_ERROR("clock recovery tried 5 times\n");
672 break;
673 }
674 } else
675 tries = 0;
676
677 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
678
679 /* Compute new train_set as requested by sink */
680 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
681 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
682 }
683 if (!clock_recovery)
684 DRM_ERROR("clock recovery failed\n");
685 else
686 DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
687 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
688 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
689 DP_TRAIN_PRE_EMPHASIS_SHIFT);
690
691
692 /* set training pattern 2 on the sink */
693 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
694 /* set training pattern 2 on the source */
695 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
696 dig_connector->dp_clock, enc_id, 1);
697
698 /* channel equalization loop */
699 tries = 0;
700 channel_eq = false;
701 for (;;) {
702 udelay(400);
703 if (!atom_dp_get_link_status(radeon_connector, link_status))
704 break;
705
706 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
707 channel_eq = true;
708 break;
709 }
710
711 /* Try 5 times */
712 if (tries > 5) {
713 DRM_ERROR("channel eq failed: 5 tries\n");
714 break;
715 }
716
717 /* Compute new train_set as requested by sink */
718 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
719 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
720
721 tries++;
722 }
723
724 if (!channel_eq)
725 DRM_ERROR("channel eq failed\n");
726 else
727 DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
728 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
729 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
730 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
731
732 /* disable the training pattern on the sink */
733 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
734
735 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
736 dig_connector->dp_clock, enc_id, 0);
737}
738
739int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
740 uint8_t write_byte, uint8_t *read_byte)
741{
742 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
743 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
744 int ret = 0;
745 uint16_t address = algo_data->address;
746 uint8_t msg[5];
747 uint8_t reply[2];
748 int msg_len, dp_msg_len;
749 int reply_bytes;
750
751 /* Set up the command byte */
752 if (mode & MODE_I2C_READ)
753 msg[2] = AUX_I2C_READ << 4;
754 else
755 msg[2] = AUX_I2C_WRITE << 4;
756
757 if (!(mode & MODE_I2C_STOP))
758 msg[2] |= AUX_I2C_MOT << 4;
759
760 msg[0] = address;
761 msg[1] = address >> 8;
762
763 reply_bytes = 1;
764
765 msg_len = 4;
766 dp_msg_len = 3;
767 switch (mode) {
768 case MODE_I2C_WRITE:
769 msg[4] = write_byte;
770 msg_len++;
771 dp_msg_len += 2;
772 break;
773 case MODE_I2C_READ:
774 dp_msg_len += 1;
775 break;
776 default:
777 break;
778 }
779
780 msg[3] = (dp_msg_len) << 4;
781 ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
782
783 if (ret) {
784 if (read_byte)
785 *read_byte = reply[0];
786 return reply_bytes;
787 }
788 return -EREMOTEIO;
789}
790
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93eabcf16..824cc6480a06 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
66 */ 66 */
67 67
68/* hpd for digital panel detect/disconnect */
69bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
70{
71 bool connected = false;
72
73 switch (hpd) {
74 case RADEON_HPD_1:
75 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
76 connected = true;
77 break;
78 case RADEON_HPD_2:
79 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
80 connected = true;
81 break;
82 default:
83 break;
84 }
85 return connected;
86}
87
88void r100_hpd_set_polarity(struct radeon_device *rdev,
89 enum radeon_hpd_id hpd)
90{
91 u32 tmp;
92 bool connected = r100_hpd_sense(rdev, hpd);
93
94 switch (hpd) {
95 case RADEON_HPD_1:
96 tmp = RREG32(RADEON_FP_GEN_CNTL);
97 if (connected)
98 tmp &= ~RADEON_FP_DETECT_INT_POL;
99 else
100 tmp |= RADEON_FP_DETECT_INT_POL;
101 WREG32(RADEON_FP_GEN_CNTL, tmp);
102 break;
103 case RADEON_HPD_2:
104 tmp = RREG32(RADEON_FP2_GEN_CNTL);
105 if (connected)
106 tmp &= ~RADEON_FP2_DETECT_INT_POL;
107 else
108 tmp |= RADEON_FP2_DETECT_INT_POL;
109 WREG32(RADEON_FP2_GEN_CNTL, tmp);
110 break;
111 default:
112 break;
113 }
114}
115
116void r100_hpd_init(struct radeon_device *rdev)
117{
118 struct drm_device *dev = rdev->ddev;
119 struct drm_connector *connector;
120
121 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
122 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
123 switch (radeon_connector->hpd.hpd) {
124 case RADEON_HPD_1:
125 rdev->irq.hpd[0] = true;
126 break;
127 case RADEON_HPD_2:
128 rdev->irq.hpd[1] = true;
129 break;
130 default:
131 break;
132 }
133 }
134 r100_irq_set(rdev);
135}
136
137void r100_hpd_fini(struct radeon_device *rdev)
138{
139 struct drm_device *dev = rdev->ddev;
140 struct drm_connector *connector;
141
142 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
143 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
144 switch (radeon_connector->hpd.hpd) {
145 case RADEON_HPD_1:
146 rdev->irq.hpd[0] = false;
147 break;
148 case RADEON_HPD_2:
149 rdev->irq.hpd[1] = false;
150 break;
151 default:
152 break;
153 }
154 }
155}
156
68/* 157/*
69 * PCI GART 158 * PCI GART
70 */ 159 */
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
94 return radeon_gart_table_ram_alloc(rdev); 183 return radeon_gart_table_ram_alloc(rdev);
95} 184}
96 185
186/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
187void r100_enable_bm(struct radeon_device *rdev)
188{
189 uint32_t tmp;
190 /* Enable bus mastering */
191 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
192 WREG32(RADEON_BUS_CNTL, tmp);
193}
194
97int r100_pci_gart_enable(struct radeon_device *rdev) 195int r100_pci_gart_enable(struct radeon_device *rdev)
98{ 196{
99 uint32_t tmp; 197 uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
105 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 203 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
106 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 204 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107 WREG32(RADEON_AIC_HI_ADDR, tmp); 205 WREG32(RADEON_AIC_HI_ADDR, tmp);
108 /* Enable bus mastering */
109 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
110 WREG32(RADEON_BUS_CNTL, tmp);
111 /* set PCI GART page-table base address */ 206 /* set PCI GART page-table base address */
112 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 207 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
113 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 208 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
157 if (rdev->irq.crtc_vblank_int[1]) { 252 if (rdev->irq.crtc_vblank_int[1]) {
158 tmp |= RADEON_CRTC2_VBLANK_MASK; 253 tmp |= RADEON_CRTC2_VBLANK_MASK;
159 } 254 }
255 if (rdev->irq.hpd[0]) {
256 tmp |= RADEON_FP_DETECT_MASK;
257 }
258 if (rdev->irq.hpd[1]) {
259 tmp |= RADEON_FP2_DETECT_MASK;
260 }
160 WREG32(RADEON_GEN_INT_CNTL, tmp); 261 WREG32(RADEON_GEN_INT_CNTL, tmp);
161 return 0; 262 return 0;
162} 263}
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
175static inline uint32_t r100_irq_ack(struct radeon_device *rdev) 276static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
176{ 277{
177 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 278 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
178 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | 279 uint32_t irq_mask = RADEON_SW_INT_TEST |
179 RADEON_CRTC2_VBLANK_STAT; 280 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
281 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
180 282
181 if (irqs) { 283 if (irqs) {
182 WREG32(RADEON_GEN_INT_STATUS, irqs); 284 WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
187int r100_irq_process(struct radeon_device *rdev) 289int r100_irq_process(struct radeon_device *rdev)
188{ 290{
189 uint32_t status, msi_rearm; 291 uint32_t status, msi_rearm;
292 bool queue_hotplug = false;
190 293
191 status = r100_irq_ack(rdev); 294 status = r100_irq_ack(rdev);
192 if (!status) { 295 if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
207 if (status & RADEON_CRTC2_VBLANK_STAT) { 310 if (status & RADEON_CRTC2_VBLANK_STAT) {
208 drm_handle_vblank(rdev->ddev, 1); 311 drm_handle_vblank(rdev->ddev, 1);
209 } 312 }
313 if (status & RADEON_FP_DETECT_STAT) {
314 queue_hotplug = true;
315 DRM_DEBUG("HPD1\n");
316 }
317 if (status & RADEON_FP2_DETECT_STAT) {
318 queue_hotplug = true;
319 DRM_DEBUG("HPD2\n");
320 }
210 status = r100_irq_ack(rdev); 321 status = r100_irq_ack(rdev);
211 } 322 }
323 if (queue_hotplug)
324 queue_work(rdev->wq, &rdev->hotplug_work);
212 if (rdev->msi_enabled) { 325 if (rdev->msi_enabled) {
213 switch (rdev->family) { 326 switch (rdev->family) {
214 case CHIP_RS400: 327 case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
255 int r; 368 int r;
256 369
257 if (rdev->wb.wb_obj == NULL) { 370 if (rdev->wb.wb_obj == NULL) {
258 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 371 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
259 true, 372 RADEON_GEM_DOMAIN_GTT,
260 RADEON_GEM_DOMAIN_GTT, 373 &rdev->wb.wb_obj);
261 false, &rdev->wb.wb_obj);
262 if (r) { 374 if (r) {
263 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 375 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
264 return r; 376 return r;
265 } 377 }
266 r = radeon_object_pin(rdev->wb.wb_obj, 378 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
267 RADEON_GEM_DOMAIN_GTT, 379 if (unlikely(r != 0))
268 &rdev->wb.gpu_addr); 380 return r;
381 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
382 &rdev->wb.gpu_addr);
269 if (r) { 383 if (r) {
270 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 384 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
385 radeon_bo_unreserve(rdev->wb.wb_obj);
271 return r; 386 return r;
272 } 387 }
273 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 388 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
389 radeon_bo_unreserve(rdev->wb.wb_obj);
274 if (r) { 390 if (r) {
275 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 391 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
276 return r; 392 return r;
277 } 393 }
278 } 394 }
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
290 406
291void r100_wb_fini(struct radeon_device *rdev) 407void r100_wb_fini(struct radeon_device *rdev)
292{ 408{
409 int r;
410
293 r100_wb_disable(rdev); 411 r100_wb_disable(rdev);
294 if (rdev->wb.wb_obj) { 412 if (rdev->wb.wb_obj) {
295 radeon_object_kunmap(rdev->wb.wb_obj); 413 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
296 radeon_object_unpin(rdev->wb.wb_obj); 414 if (unlikely(r != 0)) {
297 radeon_object_unref(&rdev->wb.wb_obj); 415 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
416 return;
417 }
418 radeon_bo_kunmap(rdev->wb.wb_obj);
419 radeon_bo_unpin(rdev->wb.wb_obj);
420 radeon_bo_unreserve(rdev->wb.wb_obj);
421 radeon_bo_unref(&rdev->wb.wb_obj);
298 rdev->wb.wb = NULL; 422 rdev->wb.wb = NULL;
299 rdev->wb.wb_obj = NULL; 423 rdev->wb.wb_obj = NULL;
300 } 424 }
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1288 1412
1289int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1413int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1290 struct radeon_cs_packet *pkt, 1414 struct radeon_cs_packet *pkt,
1291 struct radeon_object *robj) 1415 struct radeon_bo *robj)
1292{ 1416{
1293 unsigned idx; 1417 unsigned idx;
1294 u32 value; 1418 u32 value;
1295 idx = pkt->idx + 1; 1419 idx = pkt->idx + 1;
1296 value = radeon_get_ib_value(p, idx + 2); 1420 value = radeon_get_ib_value(p, idx + 2);
1297 if ((value + 1) > radeon_object_size(robj)) { 1421 if ((value + 1) > radeon_bo_size(robj)) {
1298 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1422 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1299 "(need %u have %lu) !\n", 1423 "(need %u have %lu) !\n",
1300 value + 1, 1424 value + 1,
1301 radeon_object_size(robj)); 1425 radeon_bo_size(robj));
1302 return -EINVAL; 1426 return -EINVAL;
1303 } 1427 }
1304 return 0; 1428 return 0;
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev)
1583 r100_hdp_reset(rdev); 1707 r100_hdp_reset(rdev);
1584} 1708}
1585 1709
1710void r100_hdp_flush(struct radeon_device *rdev)
1711{
1712 u32 tmp;
1713 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1714 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
1715 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1716}
1717
1586void r100_hdp_reset(struct radeon_device *rdev) 1718void r100_hdp_reset(struct radeon_device *rdev)
1587{ 1719{
1588 uint32_t tmp; 1720 uint32_t tmp;
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
1650 return 0; 1782 return 0;
1651} 1783}
1652 1784
1785void r100_set_common_regs(struct radeon_device *rdev)
1786{
1787 /* set these so they don't interfere with anything */
1788 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1789 WREG32(RADEON_SUBPIC_CNTL, 0);
1790 WREG32(RADEON_VIPH_CONTROL, 0);
1791 WREG32(RADEON_I2C_CNTL_1, 0);
1792 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1793 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1794 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1795}
1653 1796
1654/* 1797/*
1655 * VRAM info 1798 * VRAM info
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2594 struct r100_cs_track *track, unsigned idx) 2737 struct r100_cs_track *track, unsigned idx)
2595{ 2738{
2596 unsigned face, w, h; 2739 unsigned face, w, h;
2597 struct radeon_object *cube_robj; 2740 struct radeon_bo *cube_robj;
2598 unsigned long size; 2741 unsigned long size;
2599 2742
2600 for (face = 0; face < 5; face++) { 2743 for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2607 2750
2608 size += track->textures[idx].cube_info[face].offset; 2751 size += track->textures[idx].cube_info[face].offset;
2609 2752
2610 if (size > radeon_object_size(cube_robj)) { 2753 if (size > radeon_bo_size(cube_robj)) {
2611 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2754 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2612 size, radeon_object_size(cube_robj)); 2755 size, radeon_bo_size(cube_robj));
2613 r100_cs_track_texture_print(&track->textures[idx]); 2756 r100_cs_track_texture_print(&track->textures[idx]);
2614 return -1; 2757 return -1;
2615 } 2758 }
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2620static int r100_cs_track_texture_check(struct radeon_device *rdev, 2763static int r100_cs_track_texture_check(struct radeon_device *rdev,
2621 struct r100_cs_track *track) 2764 struct r100_cs_track *track)
2622{ 2765{
2623 struct radeon_object *robj; 2766 struct radeon_bo *robj;
2624 unsigned long size; 2767 unsigned long size;
2625 unsigned u, i, w, h; 2768 unsigned u, i, w, h;
2626 int ret; 2769 int ret;
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2676 "%u\n", track->textures[u].tex_coord_type, u); 2819 "%u\n", track->textures[u].tex_coord_type, u);
2677 return -EINVAL; 2820 return -EINVAL;
2678 } 2821 }
2679 if (size > radeon_object_size(robj)) { 2822 if (size > radeon_bo_size(robj)) {
2680 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2823 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2681 "%lu\n", u, size, radeon_object_size(robj)); 2824 "%lu\n", u, size, radeon_bo_size(robj));
2682 r100_cs_track_texture_print(&track->textures[u]); 2825 r100_cs_track_texture_print(&track->textures[u]);
2683 return -EINVAL; 2826 return -EINVAL;
2684 } 2827 }
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2700 } 2843 }
2701 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2844 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2702 size += track->cb[i].offset; 2845 size += track->cb[i].offset;
2703 if (size > radeon_object_size(track->cb[i].robj)) { 2846 if (size > radeon_bo_size(track->cb[i].robj)) {
2704 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2847 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2705 "(need %lu have %lu) !\n", i, size, 2848 "(need %lu have %lu) !\n", i, size,
2706 radeon_object_size(track->cb[i].robj)); 2849 radeon_bo_size(track->cb[i].robj));
2707 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2850 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2708 i, track->cb[i].pitch, track->cb[i].cpp, 2851 i, track->cb[i].pitch, track->cb[i].cpp,
2709 track->cb[i].offset, track->maxy); 2852 track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2717 } 2860 }
2718 size = track->zb.pitch * track->zb.cpp * track->maxy; 2861 size = track->zb.pitch * track->zb.cpp * track->maxy;
2719 size += track->zb.offset; 2862 size += track->zb.offset;
2720 if (size > radeon_object_size(track->zb.robj)) { 2863 if (size > radeon_bo_size(track->zb.robj)) {
2721 DRM_ERROR("[drm] Buffer too small for z buffer " 2864 DRM_ERROR("[drm] Buffer too small for z buffer "
2722 "(need %lu have %lu) !\n", size, 2865 "(need %lu have %lu) !\n", size,
2723 radeon_object_size(track->zb.robj)); 2866 radeon_bo_size(track->zb.robj));
2724 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2867 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2725 track->zb.pitch, track->zb.cpp, 2868 track->zb.pitch, track->zb.cpp,
2726 track->zb.offset, track->maxy); 2869 track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2738 "bound\n", prim_walk, i); 2881 "bound\n", prim_walk, i);
2739 return -EINVAL; 2882 return -EINVAL;
2740 } 2883 }
2741 if (size > radeon_object_size(track->arrays[i].robj)) { 2884 if (size > radeon_bo_size(track->arrays[i].robj)) {
2742 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2885 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2743 "have %lu dwords\n", prim_walk, i, 2886 "need %lu dwords have %lu dwords\n",
2744 size >> 2, 2887 prim_walk, i, size >> 2,
2745 radeon_object_size(track->arrays[i].robj) >> 2); 2888 radeon_bo_size(track->arrays[i].robj)
2889 >> 2);
2746 DRM_ERROR("Max indices %u\n", track->max_indx); 2890 DRM_ERROR("Max indices %u\n", track->max_indx);
2747 return -EINVAL; 2891 return -EINVAL;
2748 } 2892 }
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2756 "bound\n", prim_walk, i); 2900 "bound\n", prim_walk, i);
2757 return -EINVAL; 2901 return -EINVAL;
2758 } 2902 }
2759 if (size > radeon_object_size(track->arrays[i].robj)) { 2903 if (size > radeon_bo_size(track->arrays[i].robj)) {
2760 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2904 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2761 "have %lu dwords\n", prim_walk, i, size >> 2, 2905 "need %lu dwords have %lu dwords\n",
2762 radeon_object_size(track->arrays[i].robj) >> 2); 2906 prim_walk, i, size >> 2,
2907 radeon_bo_size(track->arrays[i].robj)
2908 >> 2);
2763 return -EINVAL; 2909 return -EINVAL;
2764 } 2910 }
2765 } 2911 }
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev)
3101{ 3247{
3102 int r; 3248 int r;
3103 3249
3250 /* set common regs */
3251 r100_set_common_regs(rdev);
3252 /* program mc */
3104 r100_mc_program(rdev); 3253 r100_mc_program(rdev);
3105 /* Resume clock */ 3254 /* Resume clock */
3106 r100_clock_startup(rdev); 3255 r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev)
3108 r100_gpu_init(rdev); 3257 r100_gpu_init(rdev);
3109 /* Initialize GART (initialize after TTM so we can allocate 3258 /* Initialize GART (initialize after TTM so we can allocate
3110 * memory through TTM but finalize after TTM) */ 3259 * memory through TTM but finalize after TTM) */
3260 r100_enable_bm(rdev);
3111 if (rdev->flags & RADEON_IS_PCI) { 3261 if (rdev->flags & RADEON_IS_PCI) {
3112 r = r100_pci_gart_enable(rdev); 3262 r = r100_pci_gart_enable(rdev);
3113 if (r) 3263 if (r)
3114 return r; 3264 return r;
3115 } 3265 }
3116 /* Enable IRQ */ 3266 /* Enable IRQ */
3117 rdev->irq.sw_int = true;
3118 r100_irq_set(rdev); 3267 r100_irq_set(rdev);
3119 /* 1M ring buffer */ 3268 /* 1M ring buffer */
3120 r = r100_cp_init(rdev, 1024 * 1024); 3269 r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
3150 radeon_combios_asic_init(rdev->ddev); 3299 radeon_combios_asic_init(rdev->ddev);
3151 /* Resume clock after posting */ 3300 /* Resume clock after posting */
3152 r100_clock_startup(rdev); 3301 r100_clock_startup(rdev);
3302 /* Initialize surface registers */
3303 radeon_surface_init(rdev);
3153 return r100_startup(rdev); 3304 return r100_startup(rdev);
3154} 3305}
3155 3306
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev)
3174 r100_pci_gart_fini(rdev); 3325 r100_pci_gart_fini(rdev);
3175 radeon_irq_kms_fini(rdev); 3326 radeon_irq_kms_fini(rdev);
3176 radeon_fence_driver_fini(rdev); 3327 radeon_fence_driver_fini(rdev);
3177 radeon_object_fini(rdev); 3328 radeon_bo_fini(rdev);
3178 radeon_atombios_fini(rdev); 3329 radeon_atombios_fini(rdev);
3179 kfree(rdev->bios); 3330 kfree(rdev->bios);
3180 rdev->bios = NULL; 3331 rdev->bios = NULL;
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev)
3242 RREG32(R_0007C0_CP_STAT)); 3393 RREG32(R_0007C0_CP_STAT));
3243 } 3394 }
3244 /* check if cards are posted or not */ 3395 /* check if cards are posted or not */
3245 if (!radeon_card_posted(rdev) && rdev->bios) { 3396 if (radeon_boot_test_post_card(rdev) == false)
3246 DRM_INFO("GPU not posted. posting now...\n"); 3397 return -EINVAL;
3247 radeon_combios_asic_init(rdev->ddev);
3248 }
3249 /* Set asic errata */ 3398 /* Set asic errata */
3250 r100_errata(rdev); 3399 r100_errata(rdev);
3251 /* Initialize clocks */ 3400 /* Initialize clocks */
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev)
3264 if (r) 3413 if (r)
3265 return r; 3414 return r;
3266 /* Memory manager */ 3415 /* Memory manager */
3267 r = radeon_object_init(rdev); 3416 r = radeon_bo_init(rdev);
3268 if (r) 3417 if (r)
3269 return r; 3418 return r;
3270 if (rdev->flags & RADEON_IS_PCI) { 3419 if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a891..ca50903dd2bb 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
10 * CS functions 10 * CS functions
11 */ 11 */
12struct r100_cs_track_cb { 12struct r100_cs_track_cb {
13 struct radeon_object *robj; 13 struct radeon_bo *robj;
14 unsigned pitch; 14 unsigned pitch;
15 unsigned cpp; 15 unsigned cpp;
16 unsigned offset; 16 unsigned offset;
17}; 17};
18 18
19struct r100_cs_track_array { 19struct r100_cs_track_array {
20 struct radeon_object *robj; 20 struct radeon_bo *robj;
21 unsigned esize; 21 unsigned esize;
22}; 22};
23 23
24struct r100_cs_cube_info { 24struct r100_cs_cube_info {
25 struct radeon_object *robj; 25 struct radeon_bo *robj;
26 unsigned offset; 26 unsigned offset;
27 unsigned width; 27 unsigned width;
28 unsigned height; 28 unsigned height;
29}; 29};
30 30
31struct r100_cs_track_texture { 31struct r100_cs_track_texture {
32 struct radeon_object *robj; 32 struct radeon_bo *robj;
33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ 33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
34 unsigned pitch; 34 unsigned pitch;
35 unsigned width; 35 unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e4048..83378c39d0e3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
137 137
138void rv370_pcie_gart_disable(struct radeon_device *rdev) 138void rv370_pcie_gart_disable(struct radeon_device *rdev)
139{ 139{
140 uint32_t tmp; 140 u32 tmp;
141 int r;
141 142
142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 143 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
143 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
144 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 145 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
145 if (rdev->gart.table.vram.robj) { 146 if (rdev->gart.table.vram.robj) {
146 radeon_object_kunmap(rdev->gart.table.vram.robj); 147 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
147 radeon_object_unpin(rdev->gart.table.vram.robj); 148 if (likely(r == 0)) {
149 radeon_bo_kunmap(rdev->gart.table.vram.robj);
150 radeon_bo_unpin(rdev->gart.table.vram.robj);
151 radeon_bo_unreserve(rdev->gart.table.vram.robj);
152 }
148 } 153 }
149} 154}
150 155
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
1181{ 1186{
1182 int r; 1187 int r;
1183 1188
1189 /* set common regs */
1190 r100_set_common_regs(rdev);
1191 /* program mc */
1184 r300_mc_program(rdev); 1192 r300_mc_program(rdev);
1185 /* Resume clock */ 1193 /* Resume clock */
1186 r300_clock_startup(rdev); 1194 r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
1193 if (r) 1201 if (r)
1194 return r; 1202 return r;
1195 } 1203 }
1204
1205 if (rdev->family == CHIP_R300 ||
1206 rdev->family == CHIP_R350 ||
1207 rdev->family == CHIP_RV350)
1208 r100_enable_bm(rdev);
1209
1196 if (rdev->flags & RADEON_IS_PCI) { 1210 if (rdev->flags & RADEON_IS_PCI) {
1197 r = r100_pci_gart_enable(rdev); 1211 r = r100_pci_gart_enable(rdev);
1198 if (r) 1212 if (r)
1199 return r; 1213 return r;
1200 } 1214 }
1201 /* Enable IRQ */ 1215 /* Enable IRQ */
1202 rdev->irq.sw_int = true;
1203 r100_irq_set(rdev); 1216 r100_irq_set(rdev);
1204 /* 1M ring buffer */ 1217 /* 1M ring buffer */
1205 r = r100_cp_init(rdev, 1024 * 1024); 1218 r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
1237 radeon_combios_asic_init(rdev->ddev); 1250 radeon_combios_asic_init(rdev->ddev);
1238 /* Resume clock after posting */ 1251 /* Resume clock after posting */
1239 r300_clock_startup(rdev); 1252 r300_clock_startup(rdev);
1253 /* Initialize surface registers */
1254 radeon_surface_init(rdev);
1240 return r300_startup(rdev); 1255 return r300_startup(rdev);
1241} 1256}
1242 1257
@@ -1265,7 +1280,7 @@ void r300_fini(struct radeon_device *rdev)
1265 r100_pci_gart_fini(rdev); 1280 r100_pci_gart_fini(rdev);
1266 radeon_irq_kms_fini(rdev); 1281 radeon_irq_kms_fini(rdev);
1267 radeon_fence_driver_fini(rdev); 1282 radeon_fence_driver_fini(rdev);
1268 radeon_object_fini(rdev); 1283 radeon_bo_fini(rdev);
1269 radeon_atombios_fini(rdev); 1284 radeon_atombios_fini(rdev);
1270 kfree(rdev->bios); 1285 kfree(rdev->bios);
1271 rdev->bios = NULL; 1286 rdev->bios = NULL;
@@ -1303,10 +1318,8 @@ int r300_init(struct radeon_device *rdev)
1303 RREG32(R_0007C0_CP_STAT)); 1318 RREG32(R_0007C0_CP_STAT));
1304 } 1319 }
1305 /* check if cards are posted or not */ 1320 /* check if cards are posted or not */
1306 if (!radeon_card_posted(rdev) && rdev->bios) { 1321 if (radeon_boot_test_post_card(rdev) == false)
1307 DRM_INFO("GPU not posted. posting now...\n"); 1322 return -EINVAL;
1308 radeon_combios_asic_init(rdev->ddev);
1309 }
1310 /* Set asic errata */ 1323 /* Set asic errata */
1311 r300_errata(rdev); 1324 r300_errata(rdev);
1312 /* Initialize clocks */ 1325 /* Initialize clocks */
@@ -1325,7 +1338,7 @@ int r300_init(struct radeon_device *rdev)
1325 if (r) 1338 if (r)
1326 return r; 1339 return r;
1327 /* Memory manager */ 1340 /* Memory manager */
1328 r = radeon_object_init(rdev); 1341 r = radeon_bo_init(rdev);
1329 if (r) 1342 if (r)
1330 return r; 1343 return r;
1331 if (rdev->flags & RADEON_IS_PCIE) { 1344 if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbcc0850..c05a7270cf0c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
169{ 169{
170 int r; 170 int r;
171 171
172 /* set common regs */
173 r100_set_common_regs(rdev);
174 /* program mc */
172 r300_mc_program(rdev); 175 r300_mc_program(rdev);
173 /* Resume clock */ 176 /* Resume clock */
174 r420_clock_resume(rdev); 177 r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
186 } 189 }
187 r420_pipes_init(rdev); 190 r420_pipes_init(rdev);
188 /* Enable IRQ */ 191 /* Enable IRQ */
189 rdev->irq.sw_int = true;
190 r100_irq_set(rdev); 192 r100_irq_set(rdev);
191 /* 1M ring buffer */ 193 /* 1M ring buffer */
192 r = r100_cp_init(rdev, 1024 * 1024); 194 r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
229 } 231 }
230 /* Resume clock after posting */ 232 /* Resume clock after posting */
231 r420_clock_resume(rdev); 233 r420_clock_resume(rdev);
232 234 /* Initialize surface registers */
235 radeon_surface_init(rdev);
233 return r420_startup(rdev); 236 return r420_startup(rdev);
234} 237}
235 238
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
258 radeon_agp_fini(rdev); 261 radeon_agp_fini(rdev);
259 radeon_irq_kms_fini(rdev); 262 radeon_irq_kms_fini(rdev);
260 radeon_fence_driver_fini(rdev); 263 radeon_fence_driver_fini(rdev);
261 radeon_object_fini(rdev); 264 radeon_bo_fini(rdev);
262 if (rdev->is_atom_bios) { 265 if (rdev->is_atom_bios) {
263 radeon_atombios_fini(rdev); 266 radeon_atombios_fini(rdev);
264 } else { 267 } else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
301 RREG32(R_0007C0_CP_STAT)); 304 RREG32(R_0007C0_CP_STAT));
302 } 305 }
303 /* check if cards are posted or not */ 306 /* check if cards are posted or not */
304 if (!radeon_card_posted(rdev) && rdev->bios) { 307 if (radeon_boot_test_post_card(rdev) == false)
305 DRM_INFO("GPU not posted. posting now...\n"); 308 return -EINVAL;
306 if (rdev->is_atom_bios) { 309
307 atom_asic_init(rdev->mode_info.atom_context);
308 } else {
309 radeon_combios_asic_init(rdev->ddev);
310 }
311 }
312 /* Initialize clocks */ 310 /* Initialize clocks */
313 radeon_get_clock_info(rdev->ddev); 311 radeon_get_clock_info(rdev->ddev);
314 /* Initialize power management */ 312 /* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
331 return r; 329 return r;
332 } 330 }
333 /* Memory manager */ 331 /* Memory manager */
334 r = radeon_object_init(rdev); 332 r = radeon_bo_init(rdev);
335 if (r) { 333 if (r) {
336 return r; 334 return r;
337 } 335 }
336 if (rdev->family == CHIP_R420)
337 r100_enable_bm(rdev);
338
338 if (rdev->flags & RADEON_IS_PCIE) { 339 if (rdev->flags & RADEON_IS_PCIE) {
339 r = rv370_pcie_gart_init(rdev); 340 r = rv370_pcie_gart_init(rdev);
340 if (r) 341 if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa73955563..74ad89bdf2b5 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
716 716
717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
718 718
719#define AVIVO_DC_GPIO_HPD_A 0x7e94
720
719#define AVIVO_GPIO_0 0x7e30 721#define AVIVO_GPIO_0 0x7e30
720#define AVIVO_GPIO_1 0x7e40 722#define AVIVO_GPIO_1 0x7e40
721#define AVIVO_GPIO_2 0x7e50 723#define AVIVO_GPIO_2 0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f7435185c0a6..0f3843b6dac7 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
185 return r; 185 return r;
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rdev->irq.sw_int = true;
189 rs600_irq_set(rdev); 188 rs600_irq_set(rdev);
190 /* 1M ring buffer */ 189 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024); 190 r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
221 atom_asic_init(rdev->mode_info.atom_context); 220 atom_asic_init(rdev->mode_info.atom_context);
222 /* Resume clock after posting */ 221 /* Resume clock after posting */
223 rv515_clock_startup(rdev); 222 rv515_clock_startup(rdev);
223 /* Initialize surface registers */
224 radeon_surface_init(rdev);
224 return r520_startup(rdev); 225 return r520_startup(rdev);
225} 226}
226 227
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
254 RREG32(R_0007C0_CP_STAT)); 255 RREG32(R_0007C0_CP_STAT));
255 } 256 }
256 /* check if cards are posted or not */ 257 /* check if cards are posted or not */
258 if (radeon_boot_test_post_card(rdev) == false)
259 return -EINVAL;
260
257 if (!radeon_card_posted(rdev) && rdev->bios) { 261 if (!radeon_card_posted(rdev) && rdev->bios) {
258 DRM_INFO("GPU not posted. posting now...\n"); 262 DRM_INFO("GPU not posted. posting now...\n");
259 atom_asic_init(rdev->mode_info.atom_context); 263 atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
277 if (r) 281 if (r)
278 return r; 282 return r;
279 /* Memory manager */ 283 /* Memory manager */
280 r = radeon_object_init(rdev); 284 r = radeon_bo_init(rdev);
281 if (r) 285 if (r)
282 return r; 286 return r;
283 r = rv370_pcie_gart_init(rdev); 287 r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed24358f..36656bd110bf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
41#define RLC_UCODE_SIZE 768
41#define R700_PFP_UCODE_SIZE 848 42#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360 43#define R700_PM4_UCODE_SIZE 1360
44#define R700_RLC_UCODE_SIZE 1024
43 45
44/* Firmware Names */ 46/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin"); 47MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin"); 64MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/R600_rlc.bin");
68MODULE_FIRMWARE("radeon/R700_rlc.bin");
65 69
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 70int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 71
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev); 74void r600_gpu_init(struct radeon_device *rdev);
71void r600_fini(struct radeon_device *rdev); 75void r600_fini(struct radeon_device *rdev);
72 76
77/* hpd for digital panel detect/disconnect */
78bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
79{
80 bool connected = false;
81
82 if (ASIC_IS_DCE3(rdev)) {
83 switch (hpd) {
84 case RADEON_HPD_1:
85 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
86 connected = true;
87 break;
88 case RADEON_HPD_2:
89 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
90 connected = true;
91 break;
92 case RADEON_HPD_3:
93 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
94 connected = true;
95 break;
96 case RADEON_HPD_4:
97 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
98 connected = true;
99 break;
100 /* DCE 3.2 */
101 case RADEON_HPD_5:
102 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
103 connected = true;
104 break;
105 case RADEON_HPD_6:
106 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
107 connected = true;
108 break;
109 default:
110 break;
111 }
112 } else {
113 switch (hpd) {
114 case RADEON_HPD_1:
115 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
116 connected = true;
117 break;
118 case RADEON_HPD_2:
119 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
120 connected = true;
121 break;
122 case RADEON_HPD_3:
123 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
124 connected = true;
125 break;
126 default:
127 break;
128 }
129 }
130 return connected;
131}
132
133void r600_hpd_set_polarity(struct radeon_device *rdev,
134 enum radeon_hpd_id hpd)
135{
136 u32 tmp;
137 bool connected = r600_hpd_sense(rdev, hpd);
138
139 if (ASIC_IS_DCE3(rdev)) {
140 switch (hpd) {
141 case RADEON_HPD_1:
142 tmp = RREG32(DC_HPD1_INT_CONTROL);
143 if (connected)
144 tmp &= ~DC_HPDx_INT_POLARITY;
145 else
146 tmp |= DC_HPDx_INT_POLARITY;
147 WREG32(DC_HPD1_INT_CONTROL, tmp);
148 break;
149 case RADEON_HPD_2:
150 tmp = RREG32(DC_HPD2_INT_CONTROL);
151 if (connected)
152 tmp &= ~DC_HPDx_INT_POLARITY;
153 else
154 tmp |= DC_HPDx_INT_POLARITY;
155 WREG32(DC_HPD2_INT_CONTROL, tmp);
156 break;
157 case RADEON_HPD_3:
158 tmp = RREG32(DC_HPD3_INT_CONTROL);
159 if (connected)
160 tmp &= ~DC_HPDx_INT_POLARITY;
161 else
162 tmp |= DC_HPDx_INT_POLARITY;
163 WREG32(DC_HPD3_INT_CONTROL, tmp);
164 break;
165 case RADEON_HPD_4:
166 tmp = RREG32(DC_HPD4_INT_CONTROL);
167 if (connected)
168 tmp &= ~DC_HPDx_INT_POLARITY;
169 else
170 tmp |= DC_HPDx_INT_POLARITY;
171 WREG32(DC_HPD4_INT_CONTROL, tmp);
172 break;
173 case RADEON_HPD_5:
174 tmp = RREG32(DC_HPD5_INT_CONTROL);
175 if (connected)
176 tmp &= ~DC_HPDx_INT_POLARITY;
177 else
178 tmp |= DC_HPDx_INT_POLARITY;
179 WREG32(DC_HPD5_INT_CONTROL, tmp);
180 break;
181 /* DCE 3.2 */
182 case RADEON_HPD_6:
183 tmp = RREG32(DC_HPD6_INT_CONTROL);
184 if (connected)
185 tmp &= ~DC_HPDx_INT_POLARITY;
186 else
187 tmp |= DC_HPDx_INT_POLARITY;
188 WREG32(DC_HPD6_INT_CONTROL, tmp);
189 break;
190 default:
191 break;
192 }
193 } else {
194 switch (hpd) {
195 case RADEON_HPD_1:
196 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
197 if (connected)
198 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
199 else
200 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
201 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
202 break;
203 case RADEON_HPD_2:
204 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
205 if (connected)
206 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
207 else
208 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
209 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
210 break;
211 case RADEON_HPD_3:
212 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
213 if (connected)
214 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
215 else
216 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
217 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
218 break;
219 default:
220 break;
221 }
222 }
223}
224
225void r600_hpd_init(struct radeon_device *rdev)
226{
227 struct drm_device *dev = rdev->ddev;
228 struct drm_connector *connector;
229
230 if (ASIC_IS_DCE3(rdev)) {
231 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
232 if (ASIC_IS_DCE32(rdev))
233 tmp |= DC_HPDx_EN;
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
236 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
237 switch (radeon_connector->hpd.hpd) {
238 case RADEON_HPD_1:
239 WREG32(DC_HPD1_CONTROL, tmp);
240 rdev->irq.hpd[0] = true;
241 break;
242 case RADEON_HPD_2:
243 WREG32(DC_HPD2_CONTROL, tmp);
244 rdev->irq.hpd[1] = true;
245 break;
246 case RADEON_HPD_3:
247 WREG32(DC_HPD3_CONTROL, tmp);
248 rdev->irq.hpd[2] = true;
249 break;
250 case RADEON_HPD_4:
251 WREG32(DC_HPD4_CONTROL, tmp);
252 rdev->irq.hpd[3] = true;
253 break;
254 /* DCE 3.2 */
255 case RADEON_HPD_5:
256 WREG32(DC_HPD5_CONTROL, tmp);
257 rdev->irq.hpd[4] = true;
258 break;
259 case RADEON_HPD_6:
260 WREG32(DC_HPD6_CONTROL, tmp);
261 rdev->irq.hpd[5] = true;
262 break;
263 default:
264 break;
265 }
266 }
267 } else {
268 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
269 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
270 switch (radeon_connector->hpd.hpd) {
271 case RADEON_HPD_1:
272 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
273 rdev->irq.hpd[0] = true;
274 break;
275 case RADEON_HPD_2:
276 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
277 rdev->irq.hpd[1] = true;
278 break;
279 case RADEON_HPD_3:
280 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
281 rdev->irq.hpd[2] = true;
282 break;
283 default:
284 break;
285 }
286 }
287 }
288 r600_irq_set(rdev);
289}
290
291void r600_hpd_fini(struct radeon_device *rdev)
292{
293 struct drm_device *dev = rdev->ddev;
294 struct drm_connector *connector;
295
296 if (ASIC_IS_DCE3(rdev)) {
297 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299 switch (radeon_connector->hpd.hpd) {
300 case RADEON_HPD_1:
301 WREG32(DC_HPD1_CONTROL, 0);
302 rdev->irq.hpd[0] = false;
303 break;
304 case RADEON_HPD_2:
305 WREG32(DC_HPD2_CONTROL, 0);
306 rdev->irq.hpd[1] = false;
307 break;
308 case RADEON_HPD_3:
309 WREG32(DC_HPD3_CONTROL, 0);
310 rdev->irq.hpd[2] = false;
311 break;
312 case RADEON_HPD_4:
313 WREG32(DC_HPD4_CONTROL, 0);
314 rdev->irq.hpd[3] = false;
315 break;
316 /* DCE 3.2 */
317 case RADEON_HPD_5:
318 WREG32(DC_HPD5_CONTROL, 0);
319 rdev->irq.hpd[4] = false;
320 break;
321 case RADEON_HPD_6:
322 WREG32(DC_HPD6_CONTROL, 0);
323 rdev->irq.hpd[5] = false;
324 break;
325 default:
326 break;
327 }
328 }
329 } else {
330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332 switch (radeon_connector->hpd.hpd) {
333 case RADEON_HPD_1:
334 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
335 rdev->irq.hpd[0] = false;
336 break;
337 case RADEON_HPD_2:
338 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
339 rdev->irq.hpd[1] = false;
340 break;
341 case RADEON_HPD_3:
342 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
343 rdev->irq.hpd[2] = false;
344 break;
345 default:
346 break;
347 }
348 }
349 }
350}
351
73/* 352/*
74 * R600 PCIE GART 353 * R600 PCIE GART
75 */ 354 */
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
180void r600_pcie_gart_disable(struct radeon_device *rdev) 459void r600_pcie_gart_disable(struct radeon_device *rdev)
181{ 460{
182 u32 tmp; 461 u32 tmp;
183 int i; 462 int i, r;
184 463
185 /* Disable all tables */ 464 /* Disable all tables */
186 for (i = 0; i < 7; i++) 465 for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 487 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 488 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
210 if (rdev->gart.table.vram.robj) { 489 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj); 490 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
212 radeon_object_unpin(rdev->gart.table.vram.robj); 491 if (likely(r == 0)) {
492 radeon_bo_kunmap(rdev->gart.table.vram.robj);
493 radeon_bo_unpin(rdev->gart.table.vram.robj);
494 radeon_bo_unreserve(rdev->gart.table.vram.robj);
495 }
213 } 496 }
214} 497}
215 498
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1101 (void)RREG32(PCIE_PORT_DATA); 1384 (void)RREG32(PCIE_PORT_DATA);
1102} 1385}
1103 1386
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1104 1391
1105/* 1392/*
1106 * CP & Ring 1393 * CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
1110 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1397 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1111} 1398}
1112 1399
1113int r600_cp_init_microcode(struct radeon_device *rdev) 1400int r600_init_microcode(struct radeon_device *rdev)
1114{ 1401{
1115 struct platform_device *pdev; 1402 struct platform_device *pdev;
1116 const char *chip_name; 1403 const char *chip_name;
1117 size_t pfp_req_size, me_req_size; 1404 const char *rlc_chip_name;
1405 size_t pfp_req_size, me_req_size, rlc_req_size;
1118 char fw_name[30]; 1406 char fw_name[30];
1119 int err; 1407 int err;
1120 1408
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1128 } 1416 }
1129 1417
1130 switch (rdev->family) { 1418 switch (rdev->family) {
1131 case CHIP_R600: chip_name = "R600"; break; 1419 case CHIP_R600:
1132 case CHIP_RV610: chip_name = "RV610"; break; 1420 chip_name = "R600";
1133 case CHIP_RV630: chip_name = "RV630"; break; 1421 rlc_chip_name = "R600";
1134 case CHIP_RV620: chip_name = "RV620"; break; 1422 break;
1135 case CHIP_RV635: chip_name = "RV635"; break; 1423 case CHIP_RV610:
1136 case CHIP_RV670: chip_name = "RV670"; break; 1424 chip_name = "RV610";
1425 rlc_chip_name = "R600";
1426 break;
1427 case CHIP_RV630:
1428 chip_name = "RV630";
1429 rlc_chip_name = "R600";
1430 break;
1431 case CHIP_RV620:
1432 chip_name = "RV620";
1433 rlc_chip_name = "R600";
1434 break;
1435 case CHIP_RV635:
1436 chip_name = "RV635";
1437 rlc_chip_name = "R600";
1438 break;
1439 case CHIP_RV670:
1440 chip_name = "RV670";
1441 rlc_chip_name = "R600";
1442 break;
1137 case CHIP_RS780: 1443 case CHIP_RS780:
1138 case CHIP_RS880: chip_name = "RS780"; break; 1444 case CHIP_RS880:
1139 case CHIP_RV770: chip_name = "RV770"; break; 1445 chip_name = "RS780";
1446 rlc_chip_name = "R600";
1447 break;
1448 case CHIP_RV770:
1449 chip_name = "RV770";
1450 rlc_chip_name = "R700";
1451 break;
1140 case CHIP_RV730: 1452 case CHIP_RV730:
1141 case CHIP_RV740: chip_name = "RV730"; break; 1453 case CHIP_RV740:
1142 case CHIP_RV710: chip_name = "RV710"; break; 1454 chip_name = "RV730";
1455 rlc_chip_name = "R700";
1456 break;
1457 case CHIP_RV710:
1458 chip_name = "RV710";
1459 rlc_chip_name = "R700";
1460 break;
1143 default: BUG(); 1461 default: BUG();
1144 } 1462 }
1145 1463
1146 if (rdev->family >= CHIP_RV770) { 1464 if (rdev->family >= CHIP_RV770) {
1147 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1465 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1148 me_req_size = R700_PM4_UCODE_SIZE * 4; 1466 me_req_size = R700_PM4_UCODE_SIZE * 4;
1467 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1149 } else { 1468 } else {
1150 pfp_req_size = PFP_UCODE_SIZE * 4; 1469 pfp_req_size = PFP_UCODE_SIZE * 4;
1151 me_req_size = PM4_UCODE_SIZE * 12; 1470 me_req_size = PM4_UCODE_SIZE * 12;
1471 rlc_req_size = RLC_UCODE_SIZE * 4;
1152 } 1472 }
1153 1473
1154 DRM_INFO("Loading %s CP Microcode\n", chip_name); 1474 DRM_INFO("Loading %s Microcode\n", chip_name);
1155 1475
1156 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1476 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1157 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1477 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1175 rdev->me_fw->size, fw_name); 1495 rdev->me_fw->size, fw_name);
1176 err = -EINVAL; 1496 err = -EINVAL;
1177 } 1497 }
1498
1499 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1500 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1501 if (err)
1502 goto out;
1503 if (rdev->rlc_fw->size != rlc_req_size) {
1504 printk(KERN_ERR
1505 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1506 rdev->rlc_fw->size, fw_name);
1507 err = -EINVAL;
1508 }
1509
1178out: 1510out:
1179 platform_device_unregister(pdev); 1511 platform_device_unregister(pdev);
1180 1512
@@ -1187,6 +1519,8 @@ out:
1187 rdev->pfp_fw = NULL; 1519 rdev->pfp_fw = NULL;
1188 release_firmware(rdev->me_fw); 1520 release_firmware(rdev->me_fw);
1189 rdev->me_fw = NULL; 1521 rdev->me_fw = NULL;
1522 release_firmware(rdev->rlc_fw);
1523 rdev->rlc_fw = NULL;
1190 } 1524 }
1191 return err; 1525 return err;
1192} 1526}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
1381 1715
1382void r600_wb_disable(struct radeon_device *rdev) 1716void r600_wb_disable(struct radeon_device *rdev)
1383{ 1717{
1718 int r;
1719
1384 WREG32(SCRATCH_UMSK, 0); 1720 WREG32(SCRATCH_UMSK, 0);
1385 if (rdev->wb.wb_obj) { 1721 if (rdev->wb.wb_obj) {
1386 radeon_object_kunmap(rdev->wb.wb_obj); 1722 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1387 radeon_object_unpin(rdev->wb.wb_obj); 1723 if (unlikely(r != 0))
1724 return;
1725 radeon_bo_kunmap(rdev->wb.wb_obj);
1726 radeon_bo_unpin(rdev->wb.wb_obj);
1727 radeon_bo_unreserve(rdev->wb.wb_obj);
1388 } 1728 }
1389} 1729}
1390 1730
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
1392{ 1732{
1393 r600_wb_disable(rdev); 1733 r600_wb_disable(rdev);
1394 if (rdev->wb.wb_obj) { 1734 if (rdev->wb.wb_obj) {
1395 radeon_object_unref(&rdev->wb.wb_obj); 1735 radeon_bo_unref(&rdev->wb.wb_obj);
1396 rdev->wb.wb = NULL; 1736 rdev->wb.wb = NULL;
1397 rdev->wb.wb_obj = NULL; 1737 rdev->wb.wb_obj = NULL;
1398 } 1738 }
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
1403 int r; 1743 int r;
1404 1744
1405 if (rdev->wb.wb_obj == NULL) { 1745 if (rdev->wb.wb_obj == NULL) {
1406 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 1746 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1407 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); 1747 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1408 if (r) { 1748 if (r) {
1409 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); 1749 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1410 return r; 1750 return r;
1411 } 1751 }
1412 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 1752 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1753 if (unlikely(r != 0)) {
1754 r600_wb_fini(rdev);
1755 return r;
1756 }
1757 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1413 &rdev->wb.gpu_addr); 1758 &rdev->wb.gpu_addr);
1414 if (r) { 1759 if (r) {
1415 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); 1760 radeon_bo_unreserve(rdev->wb.wb_obj);
1761 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1416 r600_wb_fini(rdev); 1762 r600_wb_fini(rdev);
1417 return r; 1763 return r;
1418 } 1764 }
1419 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1765 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1766 radeon_bo_unreserve(rdev->wb.wb_obj);
1420 if (r) { 1767 if (r) {
1421 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); 1768 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1422 r600_wb_fini(rdev); 1769 r600_wb_fini(rdev);
1423 return r; 1770 return r;
1424 } 1771 }
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
1433void r600_fence_ring_emit(struct radeon_device *rdev, 1780void r600_fence_ring_emit(struct radeon_device *rdev,
1434 struct radeon_fence *fence) 1781 struct radeon_fence *fence)
1435{ 1782{
1783 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1436 /* Emit fence sequence & fire IRQ */ 1784 /* Emit fence sequence & fire IRQ */
1437 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1438 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1439 radeon_ring_write(rdev, fence->seq); 1787 radeon_ring_write(rdev, fence->seq);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT);
1440} 1791}
1441 1792
1442int r600_copy_dma(struct radeon_device *rdev, 1793int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
1459 return 0; 1810 return 0;
1460} 1811}
1461 1812
1462int r600_irq_process(struct radeon_device *rdev)
1463{
1464 /* FIXME: implement */
1465 return 0;
1466}
1467
1468int r600_irq_set(struct radeon_device *rdev)
1469{
1470 /* FIXME: implement */
1471 return 0;
1472}
1473
1474int r600_set_surface_reg(struct radeon_device *rdev, int reg, 1813int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1475 uint32_t tiling_flags, uint32_t pitch, 1814 uint32_t tiling_flags, uint32_t pitch,
1476 uint32_t offset, uint32_t obj_size) 1815 uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
1506{ 1845{
1507 int r; 1846 int r;
1508 1847
1848 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1849 r = r600_init_microcode(rdev);
1850 if (r) {
1851 DRM_ERROR("Failed to load firmware!\n");
1852 return r;
1853 }
1854 }
1855
1509 r600_mc_program(rdev); 1856 r600_mc_program(rdev);
1510 if (rdev->flags & RADEON_IS_AGP) { 1857 if (rdev->flags & RADEON_IS_AGP) {
1511 r600_agp_enable(rdev); 1858 r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@ int r600_startup(struct radeon_device *rdev)
1516 } 1863 }
1517 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1518 1865
1519 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1520 &rdev->r600_blit.shader_gpu_addr); 1867 if (unlikely(r != 0))
1868 return r;
1869 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1870 &rdev->r600_blit.shader_gpu_addr);
1871 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1521 if (r) { 1872 if (r) {
1522 DRM_ERROR("failed to pin blit object %d\n", r); 1873 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1523 return r; 1874 return r;
1524 } 1875 }
1525 1876
1877 /* Enable IRQ */
1878 r = r600_irq_init(rdev);
1879 if (r) {
1880 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1881 radeon_irq_kms_fini(rdev);
1882 return r;
1883 }
1884 r600_irq_set(rdev);
1885
1526 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1886 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1527 if (r) 1887 if (r)
1528 return r; 1888 return r;
@@ -1583,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
1583 1943
1584int r600_suspend(struct radeon_device *rdev) 1944int r600_suspend(struct radeon_device *rdev)
1585{ 1945{
1946 int r;
1947
1586 /* FIXME: we should wait for ring to be empty */ 1948 /* FIXME: we should wait for ring to be empty */
1587 r600_cp_stop(rdev); 1949 r600_cp_stop(rdev);
1588 rdev->cp.ready = false; 1950 rdev->cp.ready = false;
1589 r600_wb_disable(rdev); 1951 r600_wb_disable(rdev);
1590 r600_pcie_gart_disable(rdev); 1952 r600_pcie_gart_disable(rdev);
1591 /* unpin shaders bo */ 1953 /* unpin shaders bo */
1592 radeon_object_unpin(rdev->r600_blit.shader_obj); 1954 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1955 if (unlikely(r != 0))
1956 return r;
1957 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1958 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1593 return 0; 1959 return 0;
1594} 1960}
1595 1961
@@ -1627,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
1627 if (r) 1993 if (r)
1628 return r; 1994 return r;
1629 /* Post card if necessary */ 1995 /* Post card if necessary */
1630 if (!r600_card_posted(rdev) && rdev->bios) { 1996 if (!r600_card_posted(rdev)) {
1997 if (!rdev->bios) {
1998 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1999 return -EINVAL;
2000 }
1631 DRM_INFO("GPU not posted. posting now...\n"); 2001 DRM_INFO("GPU not posted. posting now...\n");
1632 atom_asic_init(rdev->mode_info.atom_context); 2002 atom_asic_init(rdev->mode_info.atom_context);
1633 } 2003 }
@@ -1650,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
1650 if (r) 2020 if (r)
1651 return r; 2021 return r;
1652 /* Memory manager */ 2022 /* Memory manager */
1653 r = radeon_object_init(rdev); 2023 r = radeon_bo_init(rdev);
1654 if (r) 2024 if (r)
1655 return r; 2025 return r;
2026
2027 r = radeon_irq_kms_init(rdev);
2028 if (r)
2029 return r;
2030
1656 rdev->cp.ring_obj = NULL; 2031 rdev->cp.ring_obj = NULL;
1657 r600_ring_init(rdev, 1024 * 1024); 2032 r600_ring_init(rdev, 1024 * 1024);
1658 2033
1659 if (!rdev->me_fw || !rdev->pfp_fw) { 2034 rdev->ih.ring_obj = NULL;
1660 r = r600_cp_init_microcode(rdev); 2035 r600_ih_ring_init(rdev, 64 * 1024);
1661 if (r) {
1662 DRM_ERROR("Failed to load firmware!\n");
1663 return r;
1664 }
1665 }
1666 2036
1667 r = r600_pcie_gart_init(rdev); 2037 r = r600_pcie_gart_init(rdev);
1668 if (r) 2038 if (r)
1669 return r; 2039 return r;
1670 2040
1671 rdev->accel_working = true;
1672 r = r600_blit_init(rdev); 2041 r = r600_blit_init(rdev);
1673 if (r) { 2042 if (r) {
1674 DRM_ERROR("radeon: failled blitter (%d).\n", r); 2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1675 return r; 2044 return r;
1676 } 2045 }
1677 2046
2047 rdev->accel_working = true;
1678 r = r600_startup(rdev); 2048 r = r600_startup(rdev);
1679 if (r) { 2049 if (r) {
1680 r600_suspend(rdev); 2050 r600_suspend(rdev);
@@ -1686,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
1686 if (rdev->accel_working) { 2056 if (rdev->accel_working) {
1687 r = radeon_ib_pool_init(rdev); 2057 r = radeon_ib_pool_init(rdev);
1688 if (r) { 2058 if (r) {
1689 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 2059 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1690 rdev->accel_working = false; 2060 rdev->accel_working = false;
1691 } 2061 }
1692 r = r600_ib_test(rdev); 2062 r = r600_ib_test(rdev);
1693 if (r) { 2063 if (r) {
1694 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2064 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1695 rdev->accel_working = false; 2065 rdev->accel_working = false;
1696 } 2066 }
1697 } 2067 }
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
1704 r600_suspend(rdev); 2074 r600_suspend(rdev);
1705 2075
1706 r600_blit_fini(rdev); 2076 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev);
1707 radeon_ring_fini(rdev); 2079 radeon_ring_fini(rdev);
1708 r600_wb_fini(rdev); 2080 r600_wb_fini(rdev);
1709 r600_pcie_gart_fini(rdev); 2081 r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
1712 radeon_clocks_fini(rdev); 2084 radeon_clocks_fini(rdev);
1713 if (rdev->flags & RADEON_IS_AGP) 2085 if (rdev->flags & RADEON_IS_AGP)
1714 radeon_agp_fini(rdev); 2086 radeon_agp_fini(rdev);
1715 radeon_object_fini(rdev); 2087 radeon_bo_fini(rdev);
1716 radeon_atombios_fini(rdev); 2088 radeon_atombios_fini(rdev);
1717 kfree(rdev->bios); 2089 kfree(rdev->bios);
1718 rdev->bios = NULL; 2090 rdev->bios = NULL;
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
1798 return r; 2170 return r;
1799} 2171}
1800 2172
2173/*
2174 * Interrupts
2175 *
2176 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2177 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2178 * writing to the ring and the GPU consuming, the GPU writes to the ring
2179 * and host consumes. As the host irq handler processes interrupts, it
2180 * increments the rptr. When the rptr catches up with the wptr, all the
2181 * current interrupts have been processed.
2182 */
2183
2184void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2185{
2186 u32 rb_bufsz;
2187
2188 /* Align ring size */
2189 rb_bufsz = drm_order(ring_size / 4);
2190 ring_size = (1 << rb_bufsz) * 4;
2191 rdev->ih.ring_size = ring_size;
2192 rdev->ih.align_mask = 4 - 1;
2193}
2194
2195static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2196{
2197 int r;
2198
2199 rdev->ih.ring_size = ring_size;
2200 /* Allocate ring buffer */
2201 if (rdev->ih.ring_obj == NULL) {
2202 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2203 true,
2204 RADEON_GEM_DOMAIN_GTT,
2205 &rdev->ih.ring_obj);
2206 if (r) {
2207 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2208 return r;
2209 }
2210 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2211 if (unlikely(r != 0))
2212 return r;
2213 r = radeon_bo_pin(rdev->ih.ring_obj,
2214 RADEON_GEM_DOMAIN_GTT,
2215 &rdev->ih.gpu_addr);
2216 if (r) {
2217 radeon_bo_unreserve(rdev->ih.ring_obj);
2218 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2219 return r;
2220 }
2221 r = radeon_bo_kmap(rdev->ih.ring_obj,
2222 (void **)&rdev->ih.ring);
2223 radeon_bo_unreserve(rdev->ih.ring_obj);
2224 if (r) {
2225 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2226 return r;
2227 }
2228 }
2229 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2230 rdev->ih.rptr = 0;
2231
2232 return 0;
2233}
2234
2235static void r600_ih_ring_fini(struct radeon_device *rdev)
2236{
2237 int r;
2238 if (rdev->ih.ring_obj) {
2239 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2240 if (likely(r == 0)) {
2241 radeon_bo_kunmap(rdev->ih.ring_obj);
2242 radeon_bo_unpin(rdev->ih.ring_obj);
2243 radeon_bo_unreserve(rdev->ih.ring_obj);
2244 }
2245 radeon_bo_unref(&rdev->ih.ring_obj);
2246 rdev->ih.ring = NULL;
2247 rdev->ih.ring_obj = NULL;
2248 }
2249}
2250
2251static void r600_rlc_stop(struct radeon_device *rdev)
2252{
2253
2254 if (rdev->family >= CHIP_RV770) {
2255 /* r7xx asics need to soft reset RLC before halting */
2256 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2257 RREG32(SRBM_SOFT_RESET);
2258 udelay(15000);
2259 WREG32(SRBM_SOFT_RESET, 0);
2260 RREG32(SRBM_SOFT_RESET);
2261 }
2262
2263 WREG32(RLC_CNTL, 0);
2264}
2265
2266static void r600_rlc_start(struct radeon_device *rdev)
2267{
2268 WREG32(RLC_CNTL, RLC_ENABLE);
2269}
2270
2271static int r600_rlc_init(struct radeon_device *rdev)
2272{
2273 u32 i;
2274 const __be32 *fw_data;
2275
2276 if (!rdev->rlc_fw)
2277 return -EINVAL;
2278
2279 r600_rlc_stop(rdev);
2280
2281 WREG32(RLC_HB_BASE, 0);
2282 WREG32(RLC_HB_CNTL, 0);
2283 WREG32(RLC_HB_RPTR, 0);
2284 WREG32(RLC_HB_WPTR, 0);
2285 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2286 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2287 WREG32(RLC_MC_CNTL, 0);
2288 WREG32(RLC_UCODE_CNTL, 0);
2289
2290 fw_data = (const __be32 *)rdev->rlc_fw->data;
2291 if (rdev->family >= CHIP_RV770) {
2292 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2293 WREG32(RLC_UCODE_ADDR, i);
2294 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2295 }
2296 } else {
2297 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2298 WREG32(RLC_UCODE_ADDR, i);
2299 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2300 }
2301 }
2302 WREG32(RLC_UCODE_ADDR, 0);
2303
2304 r600_rlc_start(rdev);
2305
2306 return 0;
2307}
2308
2309static void r600_enable_interrupts(struct radeon_device *rdev)
2310{
2311 u32 ih_cntl = RREG32(IH_CNTL);
2312 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2313
2314 ih_cntl |= ENABLE_INTR;
2315 ih_rb_cntl |= IH_RB_ENABLE;
2316 WREG32(IH_CNTL, ih_cntl);
2317 WREG32(IH_RB_CNTL, ih_rb_cntl);
2318 rdev->ih.enabled = true;
2319}
2320
2321static void r600_disable_interrupts(struct radeon_device *rdev)
2322{
2323 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2324 u32 ih_cntl = RREG32(IH_CNTL);
2325
2326 ih_rb_cntl &= ~IH_RB_ENABLE;
2327 ih_cntl &= ~ENABLE_INTR;
2328 WREG32(IH_RB_CNTL, ih_rb_cntl);
2329 WREG32(IH_CNTL, ih_cntl);
2330 /* set rptr, wptr to 0 */
2331 WREG32(IH_RB_RPTR, 0);
2332 WREG32(IH_RB_WPTR, 0);
2333 rdev->ih.enabled = false;
2334 rdev->ih.wptr = 0;
2335 rdev->ih.rptr = 0;
2336}
2337
2338static void r600_disable_interrupt_state(struct radeon_device *rdev)
2339{
2340 u32 tmp;
2341
2342 WREG32(CP_INT_CNTL, 0);
2343 WREG32(GRBM_INT_CNTL, 0);
2344 WREG32(DxMODE_INT_MASK, 0);
2345 if (ASIC_IS_DCE3(rdev)) {
2346 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2347 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2348 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2349 WREG32(DC_HPD1_INT_CONTROL, tmp);
2350 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2351 WREG32(DC_HPD2_INT_CONTROL, tmp);
2352 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2353 WREG32(DC_HPD3_INT_CONTROL, tmp);
2354 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2355 WREG32(DC_HPD4_INT_CONTROL, tmp);
2356 if (ASIC_IS_DCE32(rdev)) {
2357 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2358 WREG32(DC_HPD5_INT_CONTROL, 0);
2359 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2360 WREG32(DC_HPD6_INT_CONTROL, 0);
2361 }
2362 } else {
2363 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2364 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2365 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2366 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
2367 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2368 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
2369 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2370 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
2371 }
2372}
2373
2374int r600_irq_init(struct radeon_device *rdev)
2375{
2376 int ret = 0;
2377 int rb_bufsz;
2378 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2379
2380 /* allocate ring */
2381 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
2382 if (ret)
2383 return ret;
2384
2385 /* disable irqs */
2386 r600_disable_interrupts(rdev);
2387
2388 /* init rlc */
2389 ret = r600_rlc_init(rdev);
2390 if (ret) {
2391 r600_ih_ring_fini(rdev);
2392 return ret;
2393 }
2394
2395 /* setup interrupt control */
2396 /* set dummy read address to ring address */
2397 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2398 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2399 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2400 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2401 */
2402 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2403 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2404 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2405 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2406
2407 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2408 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2409
2410 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2411 IH_WPTR_OVERFLOW_CLEAR |
2412 (rb_bufsz << 1));
2413 /* WPTR writeback, not yet */
2414 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2415 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2416 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2417
2418 WREG32(IH_RB_CNTL, ih_rb_cntl);
2419
2420 /* set rptr, wptr to 0 */
2421 WREG32(IH_RB_RPTR, 0);
2422 WREG32(IH_RB_WPTR, 0);
2423
2424 /* Default settings for IH_CNTL (disabled at first) */
2425 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2426 /* RPTR_REARM only works if msi's are enabled */
2427 if (rdev->msi_enabled)
2428 ih_cntl |= RPTR_REARM;
2429
2430#ifdef __BIG_ENDIAN
2431 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2432#endif
2433 WREG32(IH_CNTL, ih_cntl);
2434
2435 /* force the active interrupt state to all disabled */
2436 r600_disable_interrupt_state(rdev);
2437
2438 /* enable irqs */
2439 r600_enable_interrupts(rdev);
2440
2441 return ret;
2442}
2443
2444void r600_irq_fini(struct radeon_device *rdev)
2445{
2446 r600_disable_interrupts(rdev);
2447 r600_rlc_stop(rdev);
2448 r600_ih_ring_fini(rdev);
2449}
2450
2451int r600_irq_set(struct radeon_device *rdev)
2452{
2453 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2454 u32 mode_int = 0;
2455 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2456
2457 /* don't enable anything if the ih is disabled */
2458 if (!rdev->ih.enabled)
2459 return 0;
2460
2461 if (ASIC_IS_DCE3(rdev)) {
2462 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2463 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2464 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2465 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2466 if (ASIC_IS_DCE32(rdev)) {
2467 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2468 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2469 }
2470 } else {
2471 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2472 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2473 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2474 }
2475
2476 if (rdev->irq.sw_int) {
2477 DRM_DEBUG("r600_irq_set: sw int\n");
2478 cp_int_cntl |= RB_INT_ENABLE;
2479 }
2480 if (rdev->irq.crtc_vblank_int[0]) {
2481 DRM_DEBUG("r600_irq_set: vblank 0\n");
2482 mode_int |= D1MODE_VBLANK_INT_MASK;
2483 }
2484 if (rdev->irq.crtc_vblank_int[1]) {
2485 DRM_DEBUG("r600_irq_set: vblank 1\n");
2486 mode_int |= D2MODE_VBLANK_INT_MASK;
2487 }
2488 if (rdev->irq.hpd[0]) {
2489 DRM_DEBUG("r600_irq_set: hpd 1\n");
2490 hpd1 |= DC_HPDx_INT_EN;
2491 }
2492 if (rdev->irq.hpd[1]) {
2493 DRM_DEBUG("r600_irq_set: hpd 2\n");
2494 hpd2 |= DC_HPDx_INT_EN;
2495 }
2496 if (rdev->irq.hpd[2]) {
2497 DRM_DEBUG("r600_irq_set: hpd 3\n");
2498 hpd3 |= DC_HPDx_INT_EN;
2499 }
2500 if (rdev->irq.hpd[3]) {
2501 DRM_DEBUG("r600_irq_set: hpd 4\n");
2502 hpd4 |= DC_HPDx_INT_EN;
2503 }
2504 if (rdev->irq.hpd[4]) {
2505 DRM_DEBUG("r600_irq_set: hpd 5\n");
2506 hpd5 |= DC_HPDx_INT_EN;
2507 }
2508 if (rdev->irq.hpd[5]) {
2509 DRM_DEBUG("r600_irq_set: hpd 6\n");
2510 hpd6 |= DC_HPDx_INT_EN;
2511 }
2512
2513 WREG32(CP_INT_CNTL, cp_int_cntl);
2514 WREG32(DxMODE_INT_MASK, mode_int);
2515 if (ASIC_IS_DCE3(rdev)) {
2516 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2517 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2518 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2519 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2520 if (ASIC_IS_DCE32(rdev)) {
2521 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2522 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2523 }
2524 } else {
2525 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2526 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2527 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2528 }
2529
2530 return 0;
2531}
2532
2533static inline void r600_irq_ack(struct radeon_device *rdev,
2534 u32 *disp_int,
2535 u32 *disp_int_cont,
2536 u32 *disp_int_cont2)
2537{
2538 u32 tmp;
2539
2540 if (ASIC_IS_DCE3(rdev)) {
2541 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2542 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2543 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2544 } else {
2545 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2546 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2547 *disp_int_cont2 = 0;
2548 }
2549
2550 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2551 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2552 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2553 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2554 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2555 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2556 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2557 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2558 if (*disp_int & DC_HPD1_INTERRUPT) {
2559 if (ASIC_IS_DCE3(rdev)) {
2560 tmp = RREG32(DC_HPD1_INT_CONTROL);
2561 tmp |= DC_HPDx_INT_ACK;
2562 WREG32(DC_HPD1_INT_CONTROL, tmp);
2563 } else {
2564 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2565 tmp |= DC_HPDx_INT_ACK;
2566 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2567 }
2568 }
2569 if (*disp_int & DC_HPD2_INTERRUPT) {
2570 if (ASIC_IS_DCE3(rdev)) {
2571 tmp = RREG32(DC_HPD2_INT_CONTROL);
2572 tmp |= DC_HPDx_INT_ACK;
2573 WREG32(DC_HPD2_INT_CONTROL, tmp);
2574 } else {
2575 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2576 tmp |= DC_HPDx_INT_ACK;
2577 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2578 }
2579 }
2580 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2581 if (ASIC_IS_DCE3(rdev)) {
2582 tmp = RREG32(DC_HPD3_INT_CONTROL);
2583 tmp |= DC_HPDx_INT_ACK;
2584 WREG32(DC_HPD3_INT_CONTROL, tmp);
2585 } else {
2586 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2587 tmp |= DC_HPDx_INT_ACK;
2588 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2589 }
2590 }
2591 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2592 tmp = RREG32(DC_HPD4_INT_CONTROL);
2593 tmp |= DC_HPDx_INT_ACK;
2594 WREG32(DC_HPD4_INT_CONTROL, tmp);
2595 }
2596 if (ASIC_IS_DCE32(rdev)) {
2597 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2598 tmp = RREG32(DC_HPD5_INT_CONTROL);
2599 tmp |= DC_HPDx_INT_ACK;
2600 WREG32(DC_HPD5_INT_CONTROL, tmp);
2601 }
2602 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2603 tmp = RREG32(DC_HPD5_INT_CONTROL);
2604 tmp |= DC_HPDx_INT_ACK;
2605 WREG32(DC_HPD6_INT_CONTROL, tmp);
2606 }
2607 }
2608}
2609
2610void r600_irq_disable(struct radeon_device *rdev)
2611{
2612 u32 disp_int, disp_int_cont, disp_int_cont2;
2613
2614 r600_disable_interrupts(rdev);
2615 /* Wait and acknowledge irq */
2616 mdelay(1);
2617 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2618 r600_disable_interrupt_state(rdev);
2619}
2620
2621static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2622{
2623 u32 wptr, tmp;
1801 2624
2625 /* XXX use writeback */
2626 wptr = RREG32(IH_RB_WPTR);
1802 2627
2628 if (wptr & RB_OVERFLOW) {
2629 WARN_ON(1);
2630 /* XXX deal with overflow */
2631 DRM_ERROR("IH RB overflow\n");
2632 tmp = RREG32(IH_RB_CNTL);
2633 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2634 WREG32(IH_RB_CNTL, tmp);
2635 }
2636 wptr = wptr & WPTR_OFFSET_MASK;
2637
2638 return wptr;
2639}
2640
2641/* r600 IV Ring
2642 * Each IV ring entry is 128 bits:
2643 * [7:0] - interrupt source id
2644 * [31:8] - reserved
2645 * [59:32] - interrupt source data
2646 * [127:60] - reserved
2647 *
2648 * The basic interrupt vector entries
2649 * are decoded as follows:
2650 * src_id src_data description
2651 * 1 0 D1 Vblank
2652 * 1 1 D1 Vline
2653 * 5 0 D2 Vblank
2654 * 5 1 D2 Vline
2655 * 19 0 FP Hot plug detection A
2656 * 19 1 FP Hot plug detection B
2657 * 19 2 DAC A auto-detection
2658 * 19 3 DAC B auto-detection
2659 * 176 - CP_INT RB
2660 * 177 - CP_INT IB1
2661 * 178 - CP_INT IB2
2662 * 181 - EOP Interrupt
2663 * 233 - GUI Idle
2664 *
2665 * Note, these are based on r600 and may need to be
2666 * adjusted or added to on newer asics
2667 */
2668
2669int r600_irq_process(struct radeon_device *rdev)
2670{
2671 u32 wptr = r600_get_ih_wptr(rdev);
2672 u32 rptr = rdev->ih.rptr;
2673 u32 src_id, src_data;
2674 u32 last_entry = rdev->ih.ring_size - 16;
2675 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2676 unsigned long flags;
2677 bool queue_hotplug = false;
2678
2679 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2680
2681 spin_lock_irqsave(&rdev->ih.lock, flags);
2682
2683 if (rptr == wptr) {
2684 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2685 return IRQ_NONE;
2686 }
2687 if (rdev->shutdown) {
2688 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2689 return IRQ_NONE;
2690 }
2691
2692restart_ih:
2693 /* display interrupts */
2694 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2695
2696 rdev->ih.wptr = wptr;
2697 while (rptr != wptr) {
2698 /* wptr/rptr are in bytes! */
2699 ring_index = rptr / 4;
2700 src_id = rdev->ih.ring[ring_index] & 0xff;
2701 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2702
2703 switch (src_id) {
2704 case 1: /* D1 vblank/vline */
2705 switch (src_data) {
2706 case 0: /* D1 vblank */
2707 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2708 drm_handle_vblank(rdev->ddev, 0);
2709 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2710 DRM_DEBUG("IH: D1 vblank\n");
2711 }
2712 break;
2713 case 1: /* D1 vline */
2714 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2715 disp_int &= ~LB_D1_VLINE_INTERRUPT;
2716 DRM_DEBUG("IH: D1 vline\n");
2717 }
2718 break;
2719 default:
2720 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2721 break;
2722 }
2723 break;
2724 case 5: /* D2 vblank/vline */
2725 switch (src_data) {
2726 case 0: /* D2 vblank */
2727 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2728 drm_handle_vblank(rdev->ddev, 1);
2729 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2730 DRM_DEBUG("IH: D2 vblank\n");
2731 }
2732 break;
2733 case 1: /* D1 vline */
2734 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2735 disp_int &= ~LB_D2_VLINE_INTERRUPT;
2736 DRM_DEBUG("IH: D2 vline\n");
2737 }
2738 break;
2739 default:
2740 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2741 break;
2742 }
2743 break;
2744 case 19: /* HPD/DAC hotplug */
2745 switch (src_data) {
2746 case 0:
2747 if (disp_int & DC_HPD1_INTERRUPT) {
2748 disp_int &= ~DC_HPD1_INTERRUPT;
2749 queue_hotplug = true;
2750 DRM_DEBUG("IH: HPD1\n");
2751 }
2752 break;
2753 case 1:
2754 if (disp_int & DC_HPD2_INTERRUPT) {
2755 disp_int &= ~DC_HPD2_INTERRUPT;
2756 queue_hotplug = true;
2757 DRM_DEBUG("IH: HPD2\n");
2758 }
2759 break;
2760 case 4:
2761 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2762 disp_int_cont &= ~DC_HPD3_INTERRUPT;
2763 queue_hotplug = true;
2764 DRM_DEBUG("IH: HPD3\n");
2765 }
2766 break;
2767 case 5:
2768 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2769 disp_int_cont &= ~DC_HPD4_INTERRUPT;
2770 queue_hotplug = true;
2771 DRM_DEBUG("IH: HPD4\n");
2772 }
2773 break;
2774 case 10:
2775 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2776 disp_int_cont &= ~DC_HPD5_INTERRUPT;
2777 queue_hotplug = true;
2778 DRM_DEBUG("IH: HPD5\n");
2779 }
2780 break;
2781 case 12:
2782 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2783 disp_int_cont &= ~DC_HPD6_INTERRUPT;
2784 queue_hotplug = true;
2785 DRM_DEBUG("IH: HPD6\n");
2786 }
2787 break;
2788 default:
2789 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2790 break;
2791 }
2792 break;
2793 case 176: /* CP_INT in ring buffer */
2794 case 177: /* CP_INT in IB1 */
2795 case 178: /* CP_INT in IB2 */
2796 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2797 radeon_fence_process(rdev);
2798 break;
2799 case 181: /* CP EOP event */
2800 DRM_DEBUG("IH: CP EOP\n");
2801 break;
2802 default:
2803 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2804 break;
2805 }
2806
2807 /* wptr/rptr are in bytes! */
2808 if (rptr == last_entry)
2809 rptr = 0;
2810 else
2811 rptr += 16;
2812 }
2813 /* make sure wptr hasn't changed while processing */
2814 wptr = r600_get_ih_wptr(rdev);
2815 if (wptr != rdev->ih.wptr)
2816 goto restart_ih;
2817 if (queue_hotplug)
2818 queue_work(rdev->wq, &rdev->hotplug_work);
2819 rdev->ih.rptr = rptr;
2820 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2821 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2822 return IRQ_HANDLED;
2823}
1803 2824
1804/* 2825/*
1805 * Debugfs info 2826 * Debugfs info
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1811 struct drm_info_node *node = (struct drm_info_node *) m->private; 2832 struct drm_info_node *node = (struct drm_info_node *) m->private;
1812 struct drm_device *dev = node->minor->dev; 2833 struct drm_device *dev = node->minor->dev;
1813 struct radeon_device *rdev = dev->dev_private; 2834 struct radeon_device *rdev = dev->dev_private;
1814 uint32_t rdp, wdp;
1815 unsigned count, i, j; 2835 unsigned count, i, j;
1816 2836
1817 radeon_ring_free_size(rdev); 2837 radeon_ring_free_size(rdev);
1818 rdp = RREG32(CP_RB_RPTR); 2838 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1819 wdp = RREG32(CP_RB_WPTR);
1820 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1821 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); 2839 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1822 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2840 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1823 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2841 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2842 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2843 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1824 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2844 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1825 seq_printf(m, "%u dwords in ring\n", count); 2845 seq_printf(m, "%u dwords in ring\n", count);
2846 i = rdev->cp.rptr;
1826 for (j = 0; j <= count; j++) { 2847 for (j = 0; j <= count; j++) {
1827 i = (rdp + j) & rdev->cp.ptr_mask;
1828 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2848 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2849 i = (i + 1) & rdev->cp.ptr_mask;
1829 } 2850 }
1830 return 0; 2851 return 0;
1831} 2852}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e1fbf3..9aecafb51b66 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
473 obj_size += r6xx_ps_size * 4; 473 obj_size += r6xx_ps_size * 4;
474 obj_size = ALIGN(obj_size, 256); 474 obj_size = ALIGN(obj_size, 256);
475 475
476 r = radeon_object_create(rdev, NULL, obj_size, 476 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
477 true, RADEON_GEM_DOMAIN_VRAM, 477 &rdev->r600_blit.shader_obj);
478 false, &rdev->r600_blit.shader_obj);
479 if (r) { 478 if (r) {
480 DRM_ERROR("r600 failed to allocate shader\n"); 479 DRM_ERROR("r600 failed to allocate shader\n");
481 return r; 480 return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
485 obj_size, 484 obj_size,
486 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); 485 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
487 486
488 r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); 487 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
488 if (unlikely(r != 0))
489 return r;
490 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
489 if (r) { 491 if (r) {
490 DRM_ERROR("failed to map blit object %d\n", r); 492 DRM_ERROR("failed to map blit object %d\n", r);
491 return r; 493 return r;
492 } 494 }
493
494 if (rdev->family >= CHIP_RV770) 495 if (rdev->family >= CHIP_RV770)
495 memcpy_toio(ptr + rdev->r600_blit.state_offset, 496 memcpy_toio(ptr + rdev->r600_blit.state_offset,
496 r7xx_default_state, rdev->r600_blit.state_len * 4); 497 r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
500 if (num_packet2s) 501 if (num_packet2s)
501 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 502 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
502 packet2s, num_packet2s * 4); 503 packet2s, num_packet2s * 4);
503
504
505 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 504 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
506 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 505 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
507 506 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
508 radeon_object_kunmap(rdev->r600_blit.shader_obj); 507 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
509 return 0; 508 return 0;
510} 509}
511 510
512void r600_blit_fini(struct radeon_device *rdev) 511void r600_blit_fini(struct radeon_device *rdev)
513{ 512{
514 radeon_object_unpin(rdev->r600_blit.shader_obj); 513 int r;
515 radeon_object_unref(&rdev->r600_blit.shader_obj); 514
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
518 goto out_unref;
519 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj);
516} 524}
517 525
518int r600_vb_ib_get(struct radeon_device *rdev) 526int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
569 ring_size = num_loops * dwords_per_loop; 577 ring_size = num_loops * dwords_per_loop;
570 /* set default + shaders */ 578 /* set default + shaders */
571 ring_size += 40; /* shaders + def state */ 579 ring_size += 40; /* shaders + def state */
572 ring_size += 3; /* fence emit for VB IB */ 580 ring_size += 5; /* fence emit for VB IB */
573 ring_size += 5; /* done copy */ 581 ring_size += 5; /* done copy */
574 ring_size += 3; /* fence emit for done copy */ 582 ring_size += 5; /* fence emit for done copy */
575 r = radeon_ring_lock(rdev, ring_size); 583 r = radeon_ring_lock(rdev, ring_size);
576 WARN_ON(r); 584 WARN_ON(r);
577 585
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428b149b..05894edadab4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
456#define WAIT_2D_IDLECLEAN_bit (1 << 16) 456#define WAIT_2D_IDLECLEAN_bit (1 << 16)
457#define WAIT_3D_IDLECLEAN_bit (1 << 17) 457#define WAIT_3D_IDLECLEAN_bit (1 << 17)
458 458
459 459#define IH_RB_CNTL 0x3e00
460# define IH_RB_ENABLE (1 << 0)
461# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
462# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
463# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
464# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
465# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
466# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
467#define IH_RB_BASE 0x3e04
468#define IH_RB_RPTR 0x3e08
469#define IH_RB_WPTR 0x3e0c
470# define RB_OVERFLOW (1 << 0)
471# define WPTR_OFFSET_MASK 0x3fffc
472#define IH_RB_WPTR_ADDR_HI 0x3e10
473#define IH_RB_WPTR_ADDR_LO 0x3e14
474#define IH_CNTL 0x3e18
475# define ENABLE_INTR (1 << 0)
476# define IH_MC_SWAP(x) ((x) << 2)
477# define IH_MC_SWAP_NONE 0
478# define IH_MC_SWAP_16BIT 1
479# define IH_MC_SWAP_32BIT 2
480# define IH_MC_SWAP_64BIT 3
481# define RPTR_REARM (1 << 4)
482# define MC_WRREQ_CREDIT(x) ((x) << 15)
483# define MC_WR_CLEAN_CNT(x) ((x) << 20)
484
485#define RLC_CNTL 0x3f00
486# define RLC_ENABLE (1 << 0)
487#define RLC_HB_BASE 0x3f10
488#define RLC_HB_CNTL 0x3f0c
489#define RLC_HB_RPTR 0x3f20
490#define RLC_HB_WPTR 0x3f1c
491#define RLC_HB_WPTR_LSB_ADDR 0x3f14
492#define RLC_HB_WPTR_MSB_ADDR 0x3f18
493#define RLC_MC_CNTL 0x3f44
494#define RLC_UCODE_CNTL 0x3f48
495#define RLC_UCODE_ADDR 0x3f2c
496#define RLC_UCODE_DATA 0x3f30
497
498#define SRBM_SOFT_RESET 0xe60
499# define SOFT_RESET_RLC (1 << 13)
500
501#define CP_INT_CNTL 0xc124
502# define CNTX_BUSY_INT_ENABLE (1 << 19)
503# define CNTX_EMPTY_INT_ENABLE (1 << 20)
504# define SCRATCH_INT_ENABLE (1 << 25)
505# define TIME_STAMP_INT_ENABLE (1 << 26)
506# define IB2_INT_ENABLE (1 << 29)
507# define IB1_INT_ENABLE (1 << 30)
508# define RB_INT_ENABLE (1 << 31)
509#define CP_INT_STATUS 0xc128
510# define SCRATCH_INT_STAT (1 << 25)
511# define TIME_STAMP_INT_STAT (1 << 26)
512# define IB2_INT_STAT (1 << 29)
513# define IB1_INT_STAT (1 << 30)
514# define RB_INT_STAT (1 << 31)
515
516#define GRBM_INT_CNTL 0x8060
517# define RDERR_INT_ENABLE (1 << 0)
518# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
519# define GUI_IDLE_INT_ENABLE (1 << 19)
520
521#define INTERRUPT_CNTL 0x5468
522# define IH_DUMMY_RD_OVERRIDE (1 << 0)
523# define IH_DUMMY_RD_EN (1 << 1)
524# define IH_REQ_NONSNOOP_EN (1 << 3)
525# define GEN_IH_INT_EN (1 << 8)
526#define INTERRUPT_CNTL2 0x546c
527
528#define D1MODE_VBLANK_STATUS 0x6534
529#define D2MODE_VBLANK_STATUS 0x6d34
530# define DxMODE_VBLANK_OCCURRED (1 << 0)
531# define DxMODE_VBLANK_ACK (1 << 4)
532# define DxMODE_VBLANK_STAT (1 << 12)
533# define DxMODE_VBLANK_INTERRUPT (1 << 16)
534# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
535#define D1MODE_VLINE_STATUS 0x653c
536#define D2MODE_VLINE_STATUS 0x6d3c
537# define DxMODE_VLINE_OCCURRED (1 << 0)
538# define DxMODE_VLINE_ACK (1 << 4)
539# define DxMODE_VLINE_STAT (1 << 12)
540# define DxMODE_VLINE_INTERRUPT (1 << 16)
541# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
542#define DxMODE_INT_MASK 0x6540
543# define D1MODE_VBLANK_INT_MASK (1 << 0)
544# define D1MODE_VLINE_INT_MASK (1 << 4)
545# define D2MODE_VBLANK_INT_MASK (1 << 8)
546# define D2MODE_VLINE_INT_MASK (1 << 12)
547#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
548# define DC_HPD1_INTERRUPT (1 << 18)
549# define DC_HPD2_INTERRUPT (1 << 19)
550#define DISP_INTERRUPT_STATUS 0x7edc
551# define LB_D1_VLINE_INTERRUPT (1 << 2)
552# define LB_D2_VLINE_INTERRUPT (1 << 3)
553# define LB_D1_VBLANK_INTERRUPT (1 << 4)
554# define LB_D2_VBLANK_INTERRUPT (1 << 5)
555# define DACA_AUTODETECT_INTERRUPT (1 << 16)
556# define DACB_AUTODETECT_INTERRUPT (1 << 17)
557# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
558# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
559# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
560# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
561#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
562#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
563# define DC_HPD4_INTERRUPT (1 << 14)
564# define DC_HPD4_RX_INTERRUPT (1 << 15)
565# define DC_HPD3_INTERRUPT (1 << 28)
566# define DC_HPD1_RX_INTERRUPT (1 << 29)
567# define DC_HPD2_RX_INTERRUPT (1 << 30)
568#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
569# define DC_HPD3_RX_INTERRUPT (1 << 0)
570# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
571# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
572# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
573# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
574# define AUX1_SW_DONE_INTERRUPT (1 << 5)
575# define AUX1_LS_DONE_INTERRUPT (1 << 6)
576# define AUX2_SW_DONE_INTERRUPT (1 << 7)
577# define AUX2_LS_DONE_INTERRUPT (1 << 8)
578# define AUX3_SW_DONE_INTERRUPT (1 << 9)
579# define AUX3_LS_DONE_INTERRUPT (1 << 10)
580# define AUX4_SW_DONE_INTERRUPT (1 << 11)
581# define AUX4_LS_DONE_INTERRUPT (1 << 12)
582# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
583# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
584/* DCE 3.2 */
585# define AUX5_SW_DONE_INTERRUPT (1 << 15)
586# define AUX5_LS_DONE_INTERRUPT (1 << 16)
587# define AUX6_SW_DONE_INTERRUPT (1 << 17)
588# define AUX6_LS_DONE_INTERRUPT (1 << 18)
589# define DC_HPD5_INTERRUPT (1 << 19)
590# define DC_HPD5_RX_INTERRUPT (1 << 20)
591# define DC_HPD6_INTERRUPT (1 << 21)
592# define DC_HPD6_RX_INTERRUPT (1 << 22)
593
594#define DACA_AUTO_DETECT_CONTROL 0x7828
595#define DACB_AUTO_DETECT_CONTROL 0x7a28
596#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
597#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
598# define DACx_AUTODETECT_MODE(x) ((x) << 0)
599# define DACx_AUTODETECT_MODE_NONE 0
600# define DACx_AUTODETECT_MODE_CONNECT 1
601# define DACx_AUTODETECT_MODE_DISCONNECT 2
602# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
603/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
604# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
605
606#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
607#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
608#define DACA_AUTODETECT_INT_CONTROL 0x7838
609#define DACB_AUTODETECT_INT_CONTROL 0x7a38
610# define DACx_AUTODETECT_ACK (1 << 0)
611# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
612
613#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
614#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
615#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
616# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
617
618#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
619#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
620#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
621# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
622# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
623
624/* DCE 3.0 */
625#define DC_HPD1_INT_STATUS 0x7d00
626#define DC_HPD2_INT_STATUS 0x7d0c
627#define DC_HPD3_INT_STATUS 0x7d18
628#define DC_HPD4_INT_STATUS 0x7d24
629/* DCE 3.2 */
630#define DC_HPD5_INT_STATUS 0x7dc0
631#define DC_HPD6_INT_STATUS 0x7df4
632# define DC_HPDx_INT_STATUS (1 << 0)
633# define DC_HPDx_SENSE (1 << 1)
634# define DC_HPDx_RX_INT_STATUS (1 << 8)
635
636#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
637#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
638#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
639# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
640# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
641# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
642/* DCE 3.0 */
643#define DC_HPD1_INT_CONTROL 0x7d04
644#define DC_HPD2_INT_CONTROL 0x7d10
645#define DC_HPD3_INT_CONTROL 0x7d1c
646#define DC_HPD4_INT_CONTROL 0x7d28
647/* DCE 3.2 */
648#define DC_HPD5_INT_CONTROL 0x7dc4
649#define DC_HPD6_INT_CONTROL 0x7df8
650# define DC_HPDx_INT_ACK (1 << 0)
651# define DC_HPDx_INT_POLARITY (1 << 8)
652# define DC_HPDx_INT_EN (1 << 16)
653# define DC_HPDx_RX_INT_ACK (1 << 20)
654# define DC_HPDx_RX_INT_EN (1 << 24)
655
656/* DCE 3.0 */
657#define DC_HPD1_CONTROL 0x7d08
658#define DC_HPD2_CONTROL 0x7d14
659#define DC_HPD3_CONTROL 0x7d20
660#define DC_HPD4_CONTROL 0x7d2c
661/* DCE 3.2 */
662#define DC_HPD5_CONTROL 0x7dc8
663#define DC_HPD6_CONTROL 0x7dfc
664# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
665# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
666/* DCE 3.2 */
667# define DC_HPDx_EN (1 << 28)
460 668
461/* 669/*
462 * PM4 670 * PM4
@@ -500,7 +708,6 @@
500#define PACKET3_WAIT_REG_MEM 0x3C 708#define PACKET3_WAIT_REG_MEM 0x3C
501#define PACKET3_MEM_WRITE 0x3D 709#define PACKET3_MEM_WRITE 0x3D
502#define PACKET3_INDIRECT_BUFFER 0x32 710#define PACKET3_INDIRECT_BUFFER 0x32
503#define PACKET3_CP_INTERRUPT 0x40
504#define PACKET3_SURFACE_SYNC 0x43 711#define PACKET3_SURFACE_SYNC 0x43
505# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 712# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
506# define PACKET3_TC_ACTION_ENA (1 << 23) 713# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -674,4 +881,5 @@
674#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) 881#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
675#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
676 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
677#endif 885#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a2f7b1..c938bb54123c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
28#ifndef __RADEON_H__ 28#ifndef __RADEON_H__
29#define __RADEON_H__ 29#define __RADEON_H__
30 30
31#include "radeon_object.h"
32
33/* TODO: Here are things that needs to be done : 31/* TODO: Here are things that needs to be done :
34 * - surface allocator & initializer : (bit like scratch reg) should 32 * - surface allocator & initializer : (bit like scratch reg) should
35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
67#include <linux/list.h> 65#include <linux/list.h>
68#include <linux/kref.h> 66#include <linux/kref.h>
69 67
68#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
72
70#include "radeon_family.h" 73#include "radeon_family.h"
71#include "radeon_mode.h" 74#include "radeon_mode.h"
72#include "radeon_reg.h" 75#include "radeon_reg.h"
@@ -85,6 +88,7 @@ extern int radeon_benchmarking;
85extern int radeon_testing; 88extern int radeon_testing;
86extern int radeon_connector_table; 89extern int radeon_connector_table;
87extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll;
88 92
89/* 93/*
90 * Copy from radeon_drv.h so we don't have to include both and have conflicting 94 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@ void radeon_fence_unref(struct radeon_fence **fence);
186 * Tiling registers 190 * Tiling registers
187 */ 191 */
188struct radeon_surface_reg { 192struct radeon_surface_reg {
189 struct radeon_object *robj; 193 struct radeon_bo *bo;
190}; 194};
191 195
192#define RADEON_GEM_MAX_SURFACES 8 196#define RADEON_GEM_MAX_SURFACES 8
193 197
194/* 198/*
195 * Radeon buffer. 199 * TTM.
196 */ 200 */
197struct radeon_object; 201struct radeon_mman {
202 struct ttm_bo_global_ref bo_global_ref;
203 struct ttm_global_reference mem_global_ref;
204 bool mem_global_referenced;
205 struct ttm_bo_device bdev;
206};
207
208struct radeon_bo {
209 /* Protected by gem.mutex */
210 struct list_head list;
211 /* Protected by tbo.reserved */
212 u32 placements[3];
213 struct ttm_placement placement;
214 struct ttm_buffer_object tbo;
215 struct ttm_bo_kmap_obj kmap;
216 unsigned pin_count;
217 void *kptr;
218 u32 tiling_flags;
219 u32 pitch;
220 int surface_reg;
221 /* Constant after initialization */
222 struct radeon_device *rdev;
223 struct drm_gem_object *gobj;
224};
198 225
199struct radeon_object_list { 226struct radeon_bo_list {
200 struct list_head list; 227 struct list_head list;
201 struct radeon_object *robj; 228 struct radeon_bo *bo;
202 uint64_t gpu_offset; 229 uint64_t gpu_offset;
203 unsigned rdomain; 230 unsigned rdomain;
204 unsigned wdomain; 231 unsigned wdomain;
205 uint32_t tiling_flags; 232 u32 tiling_flags;
206}; 233};
207 234
208int radeon_object_init(struct radeon_device *rdev);
209void radeon_object_fini(struct radeon_device *rdev);
210int radeon_object_create(struct radeon_device *rdev,
211 struct drm_gem_object *gobj,
212 unsigned long size,
213 bool kernel,
214 uint32_t domain,
215 bool interruptible,
216 struct radeon_object **robj_ptr);
217int radeon_object_kmap(struct radeon_object *robj, void **ptr);
218void radeon_object_kunmap(struct radeon_object *robj);
219void radeon_object_unref(struct radeon_object **robj);
220int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
221 uint64_t *gpu_addr);
222void radeon_object_unpin(struct radeon_object *robj);
223int radeon_object_wait(struct radeon_object *robj);
224int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
225int radeon_object_evict_vram(struct radeon_device *rdev);
226int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
227void radeon_object_force_delete(struct radeon_device *rdev);
228void radeon_object_list_add_object(struct radeon_object_list *lobj,
229 struct list_head *head);
230int radeon_object_list_validate(struct list_head *head, void *fence);
231void radeon_object_list_unvalidate(struct list_head *head);
232void radeon_object_list_clean(struct list_head *head);
233int radeon_object_fbdev_mmap(struct radeon_object *robj,
234 struct vm_area_struct *vma);
235unsigned long radeon_object_size(struct radeon_object *robj);
236void radeon_object_clear_surface_reg(struct radeon_object *robj);
237int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
238 bool force_drop);
239void radeon_object_set_tiling_flags(struct radeon_object *robj,
240 uint32_t tiling_flags, uint32_t pitch);
241void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
242void radeon_bo_move_notify(struct ttm_buffer_object *bo,
243 struct ttm_mem_reg *mem);
244void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
245/* 235/*
246 * GEM objects. 236 * GEM objects.
247 */ 237 */
248struct radeon_gem { 238struct radeon_gem {
239 struct mutex mutex;
249 struct list_head objects; 240 struct list_head objects;
250}; 241};
251 242
252int radeon_gem_init(struct radeon_device *rdev); 243int radeon_gem_init(struct radeon_device *rdev);
253void radeon_gem_fini(struct radeon_device *rdev); 244void radeon_gem_fini(struct radeon_device *rdev);
254int radeon_gem_object_create(struct radeon_device *rdev, int size, 245int radeon_gem_object_create(struct radeon_device *rdev, int size,
255 int alignment, int initial_domain, 246 int alignment, int initial_domain,
256 bool discardable, bool kernel, 247 bool discardable, bool kernel,
257 bool interruptible, 248 struct drm_gem_object **obj);
258 struct drm_gem_object **obj);
259int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 249int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
260 uint64_t *gpu_addr); 250 uint64_t *gpu_addr);
261void radeon_gem_object_unpin(struct drm_gem_object *obj); 251void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@ struct radeon_gart_table_ram {
271}; 261};
272 262
273struct radeon_gart_table_vram { 263struct radeon_gart_table_vram {
274 struct radeon_object *robj; 264 struct radeon_bo *robj;
275 volatile uint32_t *ptr; 265 volatile uint32_t *ptr;
276}; 266};
277 267
@@ -352,11 +342,16 @@ struct radeon_irq {
352 bool sw_int; 342 bool sw_int;
353 /* FIXME: use a define max crtc rather than hardcode it */ 343 /* FIXME: use a define max crtc rather than hardcode it */
354 bool crtc_vblank_int[2]; 344 bool crtc_vblank_int[2];
345 /* FIXME: use defines for max hpd/dacs */
346 bool hpd[6];
347 spinlock_t sw_lock;
348 int sw_refcount;
355}; 349};
356 350
357int radeon_irq_kms_init(struct radeon_device *rdev); 351int radeon_irq_kms_init(struct radeon_device *rdev);
358void radeon_irq_kms_fini(struct radeon_device *rdev); 352void radeon_irq_kms_fini(struct radeon_device *rdev);
359 353void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
354void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
360 355
361/* 356/*
362 * CP & ring. 357 * CP & ring.
@@ -376,7 +371,7 @@ struct radeon_ib {
376 */ 371 */
377struct radeon_ib_pool { 372struct radeon_ib_pool {
378 struct mutex mutex; 373 struct mutex mutex;
379 struct radeon_object *robj; 374 struct radeon_bo *robj;
380 struct list_head scheduled_ibs; 375 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 376 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 377 bool ready;
@@ -384,7 +379,7 @@ struct radeon_ib_pool {
384}; 379};
385 380
386struct radeon_cp { 381struct radeon_cp {
387 struct radeon_object *ring_obj; 382 struct radeon_bo *ring_obj;
388 volatile uint32_t *ring; 383 volatile uint32_t *ring;
389 unsigned rptr; 384 unsigned rptr;
390 unsigned wptr; 385 unsigned wptr;
@@ -399,8 +394,25 @@ struct radeon_cp {
399 bool ready; 394 bool ready;
400}; 395};
401 396
397/*
398 * R6xx+ IH ring
399 */
400struct r600_ih {
401 struct radeon_bo *ring_obj;
402 volatile uint32_t *ring;
403 unsigned rptr;
404 unsigned wptr;
405 unsigned wptr_old;
406 unsigned ring_size;
407 uint64_t gpu_addr;
408 uint32_t align_mask;
409 uint32_t ptr_mask;
410 spinlock_t lock;
411 bool enabled;
412};
413
402struct r600_blit { 414struct r600_blit {
403 struct radeon_object *shader_obj; 415 struct radeon_bo *shader_obj;
404 u64 shader_gpu_addr; 416 u64 shader_gpu_addr;
405 u32 vs_offset, ps_offset; 417 u32 vs_offset, ps_offset;
406 u32 state_offset; 418 u32 state_offset;
@@ -430,8 +442,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
430 */ 442 */
431struct radeon_cs_reloc { 443struct radeon_cs_reloc {
432 struct drm_gem_object *gobj; 444 struct drm_gem_object *gobj;
433 struct radeon_object *robj; 445 struct radeon_bo *robj;
434 struct radeon_object_list lobj; 446 struct radeon_bo_list lobj;
435 uint32_t handle; 447 uint32_t handle;
436 uint32_t flags; 448 uint32_t flags;
437}; 449};
@@ -527,7 +539,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
527 * Writeback 539 * Writeback
528 */ 540 */
529struct radeon_wb { 541struct radeon_wb {
530 struct radeon_object *wb_obj; 542 struct radeon_bo *wb_obj;
531 volatile uint32_t *wb; 543 volatile uint32_t *wb;
532 uint64_t gpu_addr; 544 uint64_t gpu_addr;
533}; 545};
@@ -639,6 +651,11 @@ struct radeon_asic {
639 uint32_t offset, uint32_t obj_size); 651 uint32_t offset, uint32_t obj_size);
640 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 652 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
641 void (*bandwidth_update)(struct radeon_device *rdev); 653 void (*bandwidth_update)(struct radeon_device *rdev);
654 void (*hdp_flush)(struct radeon_device *rdev);
655 void (*hpd_init)(struct radeon_device *rdev);
656 void (*hpd_fini)(struct radeon_device *rdev);
657 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
658 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
642}; 659};
643 660
644/* 661/*
@@ -751,9 +768,9 @@ struct radeon_device {
751 uint8_t *bios; 768 uint8_t *bios;
752 bool is_atom_bios; 769 bool is_atom_bios;
753 uint16_t bios_header_start; 770 uint16_t bios_header_start;
754 struct radeon_object *stollen_vga_memory; 771 struct radeon_bo *stollen_vga_memory;
755 struct fb_info *fbdev_info; 772 struct fb_info *fbdev_info;
756 struct radeon_object *fbdev_robj; 773 struct radeon_bo *fbdev_rbo;
757 struct radeon_framebuffer *fbdev_rfb; 774 struct radeon_framebuffer *fbdev_rfb;
758 /* Register mmio */ 775 /* Register mmio */
759 resource_size_t rmmio_base; 776 resource_size_t rmmio_base;
@@ -791,8 +808,12 @@ struct radeon_device {
791 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 808 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
792 const struct firmware *me_fw; /* all family ME firmware */ 809 const struct firmware *me_fw; /* all family ME firmware */
793 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 810 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
811 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
794 struct r600_blit r600_blit; 812 struct r600_blit r600_blit;
795 int msi_enabled; /* msi enabled */ 813 int msi_enabled; /* msi enabled */
814 struct r600_ih ih; /* r6/700 interrupt ring */
815 struct workqueue_struct *wq;
816 struct work_struct hotplug_work;
796}; 817};
797 818
798int radeon_device_init(struct radeon_device *rdev, 819int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
829 } 850 }
830} 851}
831 852
853/*
854 * Cast helper
855 */
856#define to_radeon_fence(p) ((struct radeon_fence *)(p))
832 857
833/* 858/*
834 * Registers read & write functions. 859 * Registers read & write functions.
@@ -965,18 +990,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
965#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) 990#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
966#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 991#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
967#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 992#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
968#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 993#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
969#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 994#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
970#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 995#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
971#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 996#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
972#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 997#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
973#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 998#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
999#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
1000#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1001#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1002#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1003#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
974 1004
975/* Common functions */ 1005/* Common functions */
976extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1006extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
977extern int radeon_modeset_init(struct radeon_device *rdev); 1007extern int radeon_modeset_init(struct radeon_device *rdev);
978extern void radeon_modeset_fini(struct radeon_device *rdev); 1008extern void radeon_modeset_fini(struct radeon_device *rdev);
979extern bool radeon_card_posted(struct radeon_device *rdev); 1009extern bool radeon_card_posted(struct radeon_device *rdev);
1010extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
980extern int radeon_clocks_init(struct radeon_device *rdev); 1011extern int radeon_clocks_init(struct radeon_device *rdev);
981extern void radeon_clocks_fini(struct radeon_device *rdev); 1012extern void radeon_clocks_fini(struct radeon_device *rdev);
982extern void radeon_scratch_init(struct radeon_device *rdev); 1013extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
984extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1015extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
985extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1016extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
986extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1017extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1018extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
987 1019
988/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1020/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
989struct r100_mc_save { 1021struct r100_mc_save {
@@ -1021,7 +1053,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
1021extern void r100_vga_render_disable(struct radeon_device *rdev); 1053extern void r100_vga_render_disable(struct radeon_device *rdev);
1022extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1054extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1023 struct radeon_cs_packet *pkt, 1055 struct radeon_cs_packet *pkt,
1024 struct radeon_object *robj); 1056 struct radeon_bo *robj);
1025extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1057extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1026 struct radeon_cs_packet *pkt, 1058 struct radeon_cs_packet *pkt,
1027 const unsigned *auth, unsigned n, 1059 const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1029extern int r100_cs_packet_parse(struct radeon_cs_parser *p, 1061extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1030 struct radeon_cs_packet *pkt, 1062 struct radeon_cs_packet *pkt,
1031 unsigned idx); 1063 unsigned idx);
1064extern void r100_enable_bm(struct radeon_device *rdev);
1065extern void r100_set_common_regs(struct radeon_device *rdev);
1032 1066
1033/* rv200,rv250,rv280 */ 1067/* rv200,rv250,rv280 */
1034extern void r200_set_safe_registers(struct radeon_device *rdev); 1068extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
1104extern void r600_scratch_init(struct radeon_device *rdev); 1138extern void r600_scratch_init(struct radeon_device *rdev);
1105extern int r600_blit_init(struct radeon_device *rdev); 1139extern int r600_blit_init(struct radeon_device *rdev);
1106extern void r600_blit_fini(struct radeon_device *rdev); 1140extern void r600_blit_fini(struct radeon_device *rdev);
1107extern int r600_cp_init_microcode(struct radeon_device *rdev); 1141extern int r600_init_microcode(struct radeon_device *rdev);
1108extern int r600_gpu_reset(struct radeon_device *rdev); 1142extern int r600_gpu_reset(struct radeon_device *rdev);
1143/* r600 irq */
1144extern int r600_irq_init(struct radeon_device *rdev);
1145extern void r600_irq_fini(struct radeon_device *rdev);
1146extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1147extern int r600_irq_set(struct radeon_device *rdev);
1148
1149#include "radeon_object.h"
1109 1150
1110#endif 1151#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee387d7..636116bedcb4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
76void r100_bandwidth_update(struct radeon_device *rdev); 76void r100_bandwidth_update(struct radeon_device *rdev);
77void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 77void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
78int r100_ring_test(struct radeon_device *rdev); 78int r100_ring_test(struct radeon_device *rdev);
79void r100_hdp_flush(struct radeon_device *rdev);
80void r100_hpd_init(struct radeon_device *rdev);
81void r100_hpd_fini(struct radeon_device *rdev);
82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
83void r100_hpd_set_polarity(struct radeon_device *rdev,
84 enum radeon_hpd_id hpd);
79 85
80static struct radeon_asic r100_asic = { 86static struct radeon_asic r100_asic = {
81 .init = &r100_init, 87 .init = &r100_init,
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
107 .set_surface_reg = r100_set_surface_reg, 113 .set_surface_reg = r100_set_surface_reg,
108 .clear_surface_reg = r100_clear_surface_reg, 114 .clear_surface_reg = r100_clear_surface_reg,
109 .bandwidth_update = &r100_bandwidth_update, 115 .bandwidth_update = &r100_bandwidth_update,
116 .hdp_flush = &r100_hdp_flush,
117 .hpd_init = &r100_hpd_init,
118 .hpd_fini = &r100_hpd_fini,
119 .hpd_sense = &r100_hpd_sense,
120 .hpd_set_polarity = &r100_hpd_set_polarity,
110}; 121};
111 122
112 123
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
162 .set_surface_reg = r100_set_surface_reg, 173 .set_surface_reg = r100_set_surface_reg,
163 .clear_surface_reg = r100_clear_surface_reg, 174 .clear_surface_reg = r100_clear_surface_reg,
164 .bandwidth_update = &r100_bandwidth_update, 175 .bandwidth_update = &r100_bandwidth_update,
176 .hdp_flush = &r100_hdp_flush,
177 .hpd_init = &r100_hpd_init,
178 .hpd_fini = &r100_hpd_fini,
179 .hpd_sense = &r100_hpd_sense,
180 .hpd_set_polarity = &r100_hpd_set_polarity,
165}; 181};
166 182
167/* 183/*
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
201 .set_surface_reg = r100_set_surface_reg, 217 .set_surface_reg = r100_set_surface_reg,
202 .clear_surface_reg = r100_clear_surface_reg, 218 .clear_surface_reg = r100_clear_surface_reg,
203 .bandwidth_update = &r100_bandwidth_update, 219 .bandwidth_update = &r100_bandwidth_update,
220 .hdp_flush = &r100_hdp_flush,
221 .hpd_init = &r100_hpd_init,
222 .hpd_fini = &r100_hpd_fini,
223 .hpd_sense = &r100_hpd_sense,
224 .hpd_set_polarity = &r100_hpd_set_polarity,
204}; 225};
205 226
206 227
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
245 .set_surface_reg = r100_set_surface_reg, 266 .set_surface_reg = r100_set_surface_reg,
246 .clear_surface_reg = r100_clear_surface_reg, 267 .clear_surface_reg = r100_clear_surface_reg,
247 .bandwidth_update = &r100_bandwidth_update, 268 .bandwidth_update = &r100_bandwidth_update,
269 .hdp_flush = &r100_hdp_flush,
270 .hpd_init = &r100_hpd_init,
271 .hpd_fini = &r100_hpd_fini,
272 .hpd_sense = &r100_hpd_sense,
273 .hpd_set_polarity = &r100_hpd_set_polarity,
248}; 274};
249 275
250 276
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
263uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 289uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
264void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 290void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
265void rs600_bandwidth_update(struct radeon_device *rdev); 291void rs600_bandwidth_update(struct radeon_device *rdev);
292void rs600_hpd_init(struct radeon_device *rdev);
293void rs600_hpd_fini(struct radeon_device *rdev);
294bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
295void rs600_hpd_set_polarity(struct radeon_device *rdev,
296 enum radeon_hpd_id hpd);
297
266static struct radeon_asic rs600_asic = { 298static struct radeon_asic rs600_asic = {
267 .init = &rs600_init, 299 .init = &rs600_init,
268 .fini = &rs600_fini, 300 .fini = &rs600_fini,
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
291 .set_pcie_lanes = NULL, 323 .set_pcie_lanes = NULL,
292 .set_clock_gating = &radeon_atom_set_clock_gating, 324 .set_clock_gating = &radeon_atom_set_clock_gating,
293 .bandwidth_update = &rs600_bandwidth_update, 325 .bandwidth_update = &rs600_bandwidth_update,
326 .hdp_flush = &r100_hdp_flush,
327 .hpd_init = &rs600_hpd_init,
328 .hpd_fini = &rs600_hpd_fini,
329 .hpd_sense = &rs600_hpd_sense,
330 .hpd_set_polarity = &rs600_hpd_set_polarity,
294}; 331};
295 332
296 333
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
334 .set_surface_reg = r100_set_surface_reg, 371 .set_surface_reg = r100_set_surface_reg,
335 .clear_surface_reg = r100_clear_surface_reg, 372 .clear_surface_reg = r100_clear_surface_reg,
336 .bandwidth_update = &rs690_bandwidth_update, 373 .bandwidth_update = &rs690_bandwidth_update,
374 .hdp_flush = &r100_hdp_flush,
375 .hpd_init = &rs600_hpd_init,
376 .hpd_fini = &rs600_hpd_fini,
377 .hpd_sense = &rs600_hpd_sense,
378 .hpd_set_polarity = &rs600_hpd_set_polarity,
337}; 379};
338 380
339 381
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
381 .set_surface_reg = r100_set_surface_reg, 423 .set_surface_reg = r100_set_surface_reg,
382 .clear_surface_reg = r100_clear_surface_reg, 424 .clear_surface_reg = r100_clear_surface_reg,
383 .bandwidth_update = &rv515_bandwidth_update, 425 .bandwidth_update = &rv515_bandwidth_update,
426 .hdp_flush = &r100_hdp_flush,
427 .hpd_init = &rs600_hpd_init,
428 .hpd_fini = &rs600_hpd_fini,
429 .hpd_sense = &rs600_hpd_sense,
430 .hpd_set_polarity = &rs600_hpd_set_polarity,
384}; 431};
385 432
386 433
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
419 .set_surface_reg = r100_set_surface_reg, 466 .set_surface_reg = r100_set_surface_reg,
420 .clear_surface_reg = r100_clear_surface_reg, 467 .clear_surface_reg = r100_clear_surface_reg,
421 .bandwidth_update = &rv515_bandwidth_update, 468 .bandwidth_update = &rv515_bandwidth_update,
469 .hdp_flush = &r100_hdp_flush,
470 .hpd_init = &rs600_hpd_init,
471 .hpd_fini = &rs600_hpd_fini,
472 .hpd_sense = &rs600_hpd_sense,
473 .hpd_set_polarity = &rs600_hpd_set_polarity,
422}; 474};
423 475
424/* 476/*
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
455int r600_copy_blit(struct radeon_device *rdev, 507int r600_copy_blit(struct radeon_device *rdev,
456 uint64_t src_offset, uint64_t dst_offset, 508 uint64_t src_offset, uint64_t dst_offset,
457 unsigned num_pages, struct radeon_fence *fence); 509 unsigned num_pages, struct radeon_fence *fence);
510void r600_hdp_flush(struct radeon_device *rdev);
511void r600_hpd_init(struct radeon_device *rdev);
512void r600_hpd_fini(struct radeon_device *rdev);
513bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
514void r600_hpd_set_polarity(struct radeon_device *rdev,
515 enum radeon_hpd_id hpd);
458 516
459static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
460 .init = &r600_init, 518 .init = &r600_init,
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
470 .ring_ib_execute = &r600_ring_ib_execute, 528 .ring_ib_execute = &r600_ring_ib_execute,
471 .irq_set = &r600_irq_set, 529 .irq_set = &r600_irq_set,
472 .irq_process = &r600_irq_process, 530 .irq_process = &r600_irq_process,
531 .get_vblank_counter = &rs600_get_vblank_counter,
473 .fence_ring_emit = &r600_fence_ring_emit, 532 .fence_ring_emit = &r600_fence_ring_emit,
474 .cs_parse = &r600_cs_parse, 533 .cs_parse = &r600_cs_parse,
475 .copy_blit = &r600_copy_blit, 534 .copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
484 .set_surface_reg = r600_set_surface_reg, 543 .set_surface_reg = r600_set_surface_reg,
485 .clear_surface_reg = r600_clear_surface_reg, 544 .clear_surface_reg = r600_clear_surface_reg,
486 .bandwidth_update = &rv515_bandwidth_update, 545 .bandwidth_update = &rv515_bandwidth_update,
546 .hdp_flush = &r600_hdp_flush,
547 .hpd_init = &r600_hpd_init,
548 .hpd_fini = &r600_hpd_fini,
549 .hpd_sense = &r600_hpd_sense,
550 .hpd_set_polarity = &r600_hpd_set_polarity,
487}; 551};
488 552
489/* 553/*
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
509 .ring_ib_execute = &r600_ring_ib_execute, 573 .ring_ib_execute = &r600_ring_ib_execute,
510 .irq_set = &r600_irq_set, 574 .irq_set = &r600_irq_set,
511 .irq_process = &r600_irq_process, 575 .irq_process = &r600_irq_process,
576 .get_vblank_counter = &rs600_get_vblank_counter,
512 .fence_ring_emit = &r600_fence_ring_emit, 577 .fence_ring_emit = &r600_fence_ring_emit,
513 .cs_parse = &r600_cs_parse, 578 .cs_parse = &r600_cs_parse,
514 .copy_blit = &r600_copy_blit, 579 .copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
523 .set_surface_reg = r600_set_surface_reg, 588 .set_surface_reg = r600_set_surface_reg,
524 .clear_surface_reg = r600_clear_surface_reg, 589 .clear_surface_reg = r600_clear_surface_reg,
525 .bandwidth_update = &rv515_bandwidth_update, 590 .bandwidth_update = &rv515_bandwidth_update,
591 .hdp_flush = &r600_hdp_flush,
592 .hpd_init = &r600_hpd_init,
593 .hpd_fini = &r600_hpd_fini,
594 .hpd_sense = &r600_hpd_sense,
595 .hpd_set_polarity = &r600_hpd_set_polarity,
526}; 596};
527 597
528#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a820935..12a0c760e7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
47 int connector_type, 47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus, 48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info, 49 bool linkb, uint32_t igp_lane_info,
50 uint16_t connector_object_id); 50 uint16_t connector_object_id,
51 struct radeon_hpd *hpd);
51 52
52/* from radeon_legacy_encoder.c */ 53/* from radeon_legacy_encoder.c */
53extern void 54extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
60 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 61 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
61}; 62};
62 63
63static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device 64static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
64 *dev, uint8_t id) 65 uint8_t id)
65{ 66{
66 struct radeon_device *rdev = dev->dev_private;
67 struct atom_context *ctx = rdev->mode_info.atom_context; 67 struct atom_context *ctx = rdev->mode_info.atom_context;
68 ATOM_GPIO_I2C_ASSIGMENT gpio; 68 ATOM_GPIO_I2C_ASSIGMENT *gpio;
69 struct radeon_i2c_bus_rec i2c; 69 struct radeon_i2c_bus_rec i2c;
70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 71 struct _ATOM_GPIO_I2C_INFO *i2c_info;
72 uint16_t data_offset; 72 uint16_t data_offset;
73 int i;
73 74
74 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
75 i2c.valid = false; 76 i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
78 79
79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 81
81 gpio = i2c_info->asGPIO_Info[id]; 82
82 83 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
83 i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; 84 gpio = &i2c_info->asGPIO_Info[i];
84 i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; 85
85 i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; 86 if (gpio->sucI2cId.ucAccess == id) {
86 i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; 87 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
87 i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; 88 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
88 i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; 89 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
89 i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; 90 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
90 i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; 91 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
91 i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); 92 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
92 i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); 93 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
93 i2c.put_clk_mask = (1 << gpio.ucClkEnShift); 94 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
94 i2c.put_data_mask = (1 << gpio.ucDataEnShift); 95 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
95 i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); 96 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
96 i2c.get_data_mask = (1 << gpio.ucDataY_Shift); 97 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
97 i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); 98 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
98 i2c.a_data_mask = (1 << gpio.ucDataA_Shift); 99 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
99 i2c.valid = true; 100 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
101 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
102 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
103
104 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
105 i2c.hw_capable = true;
106 else
107 i2c.hw_capable = false;
108
109 if (gpio->sucI2cId.ucAccess == 0xa0)
110 i2c.mm_i2c = true;
111 else
112 i2c.mm_i2c = false;
113
114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
115
116 i2c.valid = true;
117 }
118 }
100 119
101 return i2c; 120 return i2c;
102} 121}
103 122
123static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
124 u8 id)
125{
126 struct atom_context *ctx = rdev->mode_info.atom_context;
127 struct radeon_gpio_rec gpio;
128 int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
129 struct _ATOM_GPIO_PIN_LUT *gpio_info;
130 ATOM_GPIO_PIN_ASSIGNMENT *pin;
131 u16 data_offset, size;
132 int i, num_indices;
133
134 memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
135 gpio.valid = false;
136
137 atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
138
139 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
140
141 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
142
143 for (i = 0; i < num_indices; i++) {
144 pin = &gpio_info->asGPIO_Pin[i];
145 if (id == pin->ucGPIO_ID) {
146 gpio.id = pin->ucGPIO_ID;
147 gpio.reg = pin->usGpioPin_AIndex * 4;
148 gpio.mask = (1 << pin->ucGpioPinBitShift);
149 gpio.valid = true;
150 break;
151 }
152 }
153
154 return gpio;
155}
156
157static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
158 struct radeon_gpio_rec *gpio)
159{
160 struct radeon_hpd hpd;
161 hpd.gpio = *gpio;
162 if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
163 switch(gpio->mask) {
164 case (1 << 0):
165 hpd.hpd = RADEON_HPD_1;
166 break;
167 case (1 << 8):
168 hpd.hpd = RADEON_HPD_2;
169 break;
170 case (1 << 16):
171 hpd.hpd = RADEON_HPD_3;
172 break;
173 case (1 << 24):
174 hpd.hpd = RADEON_HPD_4;
175 break;
176 case (1 << 26):
177 hpd.hpd = RADEON_HPD_5;
178 break;
179 case (1 << 28):
180 hpd.hpd = RADEON_HPD_6;
181 break;
182 default:
183 hpd.hpd = RADEON_HPD_NONE;
184 break;
185 }
186 } else
187 hpd.hpd = RADEON_HPD_NONE;
188 return hpd;
189}
190
104static bool radeon_atom_apply_quirks(struct drm_device *dev, 191static bool radeon_atom_apply_quirks(struct drm_device *dev,
105 uint32_t supported_device, 192 uint32_t supported_device,
106 int *connector_type, 193 int *connector_type,
107 struct radeon_i2c_bus_rec *i2c_bus, 194 struct radeon_i2c_bus_rec *i2c_bus,
108 uint16_t *line_mux) 195 uint16_t *line_mux,
196 struct radeon_hpd *hpd)
109{ 197{
110 198
111 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ 199 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
135 } 223 }
136 } 224 }
137 225
226 /* HIS X1300 is DVI+VGA, not DVI+DVI */
227 if ((dev->pdev->device == 0x7146) &&
228 (dev->pdev->subsystem_vendor == 0x17af) &&
229 (dev->pdev->subsystem_device == 0x2058)) {
230 if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
231 return false;
232 }
233
234 /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
235 if ((dev->pdev->device == 0x7142) &&
236 (dev->pdev->subsystem_vendor == 0x1458) &&
237 (dev->pdev->subsystem_device == 0x2134)) {
238 if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
239 return false;
240 }
241
242
138 /* Funky macbooks */ 243 /* Funky macbooks */
139 if ((dev->pdev->device == 0x71C5) && 244 if ((dev->pdev->device == 0x71C5) &&
140 (dev->pdev->subsystem_vendor == 0x106b) && 245 (dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
172 } 277 }
173 } 278 }
174 279
280 /* Acer laptop reports DVI-D as DVI-I */
281 if ((dev->pdev->device == 0x95c4) &&
282 (dev->pdev->subsystem_vendor == 0x1025) &&
283 (dev->pdev->subsystem_device == 0x013c)) {
284 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
285 (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
286 *connector_type = DRM_MODE_CONNECTOR_DVID;
287 }
288
175 return true; 289 return true;
176} 290}
177 291
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
240 struct radeon_mode_info *mode_info = &rdev->mode_info; 354 struct radeon_mode_info *mode_info = &rdev->mode_info;
241 struct atom_context *ctx = mode_info->atom_context; 355 struct atom_context *ctx = mode_info->atom_context;
242 int index = GetIndexIntoMasterTable(DATA, Object_Header); 356 int index = GetIndexIntoMasterTable(DATA, Object_Header);
243 uint16_t size, data_offset; 357 u16 size, data_offset;
244 uint8_t frev, crev, line_mux = 0; 358 u8 frev, crev;
245 ATOM_CONNECTOR_OBJECT_TABLE *con_obj; 359 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
246 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; 360 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
247 ATOM_OBJECT_HEADER *obj_header; 361 ATOM_OBJECT_HEADER *obj_header;
248 int i, j, path_size, device_support; 362 int i, j, path_size, device_support;
249 int connector_type; 363 int connector_type;
250 uint16_t igp_lane_info, conn_id, connector_object_id; 364 u16 igp_lane_info, conn_id, connector_object_id;
251 bool linkb; 365 bool linkb;
252 struct radeon_i2c_bus_rec ddc_bus; 366 struct radeon_i2c_bus_rec ddc_bus;
367 struct radeon_gpio_rec gpio;
368 struct radeon_hpd hpd;
253 369
254 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 370 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
255 371
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
276 path = (ATOM_DISPLAY_OBJECT_PATH *) addr; 392 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
277 path_size += le16_to_cpu(path->usSize); 393 path_size += le16_to_cpu(path->usSize);
278 linkb = false; 394 linkb = false;
279
280 if (device_support & le16_to_cpu(path->usDeviceTag)) { 395 if (device_support & le16_to_cpu(path->usDeviceTag)) {
281 uint8_t con_obj_id, con_obj_num, con_obj_type; 396 uint8_t con_obj_id, con_obj_num, con_obj_type;
282 397
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
377 } 492 }
378 } 493 }
379 494
380 /* look up gpio for ddc */ 495 /* look up gpio for ddc, hpd */
381 if ((le16_to_cpu(path->usDeviceTag) & 496 if ((le16_to_cpu(path->usDeviceTag) &
382 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 497 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
383 == 0) {
384 for (j = 0; j < con_obj->ucNumberOfObjects; j++) { 498 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
385 if (le16_to_cpu(path->usConnObjectId) == 499 if (le16_to_cpu(path->usConnObjectId) ==
386 le16_to_cpu(con_obj->asObjects[j]. 500 le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
394 asObjects[j]. 508 asObjects[j].
395 usRecordOffset)); 509 usRecordOffset));
396 ATOM_I2C_RECORD *i2c_record; 510 ATOM_I2C_RECORD *i2c_record;
511 ATOM_HPD_INT_RECORD *hpd_record;
512 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
513 hpd.hpd = RADEON_HPD_NONE;
397 514
398 while (record->ucRecordType > 0 515 while (record->ucRecordType > 0
399 && record-> 516 && record->
400 ucRecordType <= 517 ucRecordType <=
401 ATOM_MAX_OBJECT_RECORD_NUMBER) { 518 ATOM_MAX_OBJECT_RECORD_NUMBER) {
402 switch (record-> 519 switch (record->ucRecordType) {
403 ucRecordType) {
404 case ATOM_I2C_RECORD_TYPE: 520 case ATOM_I2C_RECORD_TYPE:
405 i2c_record = 521 i2c_record =
406 (ATOM_I2C_RECORD 522 (ATOM_I2C_RECORD *)
407 *) record; 523 record;
408 line_mux = 524 i2c_config =
409 i2c_record-> 525 (ATOM_I2C_ID_CONFIG_ACCESS *)
410 sucI2cId. 526 &i2c_record->sucI2cId;
411 bfI2C_LineMux; 527 ddc_bus = radeon_lookup_i2c_gpio(rdev,
528 i2c_config->
529 ucAccess);
530 break;
531 case ATOM_HPD_INT_RECORD_TYPE:
532 hpd_record =
533 (ATOM_HPD_INT_RECORD *)
534 record;
535 gpio = radeon_lookup_gpio(rdev,
536 hpd_record->ucHPDIntGPIOID);
537 hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
538 hpd.plugged_state = hpd_record->ucPlugged_PinState;
412 break; 539 break;
413 } 540 }
414 record = 541 record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
421 break; 548 break;
422 } 549 }
423 } 550 }
424 } else 551 } else {
425 line_mux = 0; 552 hpd.hpd = RADEON_HPD_NONE;
426
427 if ((le16_to_cpu(path->usDeviceTag) ==
428 ATOM_DEVICE_TV1_SUPPORT)
429 || (le16_to_cpu(path->usDeviceTag) ==
430 ATOM_DEVICE_TV2_SUPPORT)
431 || (le16_to_cpu(path->usDeviceTag) ==
432 ATOM_DEVICE_CV_SUPPORT))
433 ddc_bus.valid = false; 553 ddc_bus.valid = false;
434 else 554 }
435 ddc_bus = radeon_lookup_gpio(dev, line_mux);
436 555
437 conn_id = le16_to_cpu(path->usConnObjectId); 556 conn_id = le16_to_cpu(path->usConnObjectId);
438 557
439 if (!radeon_atom_apply_quirks 558 if (!radeon_atom_apply_quirks
440 (dev, le16_to_cpu(path->usDeviceTag), &connector_type, 559 (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
441 &ddc_bus, &conn_id)) 560 &ddc_bus, &conn_id, &hpd))
442 continue; 561 continue;
443 562
444 radeon_add_atom_connector(dev, 563 radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
447 usDeviceTag), 566 usDeviceTag),
448 connector_type, &ddc_bus, 567 connector_type, &ddc_bus,
449 linkb, igp_lane_info, 568 linkb, igp_lane_info,
450 connector_object_id); 569 connector_object_id,
570 &hpd);
451 571
452 } 572 }
453 } 573 }
@@ -502,6 +622,7 @@ struct bios_connector {
502 uint16_t devices; 622 uint16_t devices;
503 int connector_type; 623 int connector_type;
504 struct radeon_i2c_bus_rec ddc_bus; 624 struct radeon_i2c_bus_rec ddc_bus;
625 struct radeon_hpd hpd;
505}; 626};
506 627
507bool radeon_get_atom_connector_info_from_supported_devices_table(struct 628bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
517 uint16_t device_support; 638 uint16_t device_support;
518 uint8_t dac; 639 uint8_t dac;
519 union atom_supported_devices *supported_devices; 640 union atom_supported_devices *supported_devices;
520 int i, j; 641 int i, j, max_device;
521 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 642 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
522 643
523 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 644 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
527 648
528 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); 649 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
529 650
530 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 651 if (frev > 1)
652 max_device = ATOM_MAX_SUPPORTED_DEVICE;
653 else
654 max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
655
656 for (i = 0; i < max_device; i++) {
531 ATOM_CONNECTOR_INFO_I2C ci = 657 ATOM_CONNECTOR_INFO_I2C ci =
532 supported_devices->info.asConnInfo[i]; 658 supported_devices->info.asConnInfo[i];
533 659
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
553 679
554 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; 680 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
555 681
556 if ((rdev->family == CHIP_RS690) || 682 bios_connectors[i].line_mux =
557 (rdev->family == CHIP_RS740)) { 683 ci.sucI2cId.ucAccess;
558 if ((i == ATOM_DEVICE_DFP2_INDEX)
559 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
560 bios_connectors[i].line_mux =
561 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
562 else if ((i == ATOM_DEVICE_DFP3_INDEX)
563 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
564 bios_connectors[i].line_mux =
565 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
566 else
567 bios_connectors[i].line_mux =
568 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
569 } else
570 bios_connectors[i].line_mux =
571 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
572 684
573 /* give tv unique connector ids */ 685 /* give tv unique connector ids */
574 if (i == ATOM_DEVICE_TV1_INDEX) { 686 if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
582 bios_connectors[i].line_mux = 52; 694 bios_connectors[i].line_mux = 52;
583 } else 695 } else
584 bios_connectors[i].ddc_bus = 696 bios_connectors[i].ddc_bus =
585 radeon_lookup_gpio(dev, 697 radeon_lookup_i2c_gpio(rdev,
586 bios_connectors[i].line_mux); 698 bios_connectors[i].line_mux);
699
700 if ((crev > 1) && (frev > 1)) {
701 u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
702 switch (isb) {
703 case 0x4:
704 bios_connectors[i].hpd.hpd = RADEON_HPD_1;
705 break;
706 case 0xa:
707 bios_connectors[i].hpd.hpd = RADEON_HPD_2;
708 break;
709 default:
710 bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
711 break;
712 }
713 } else {
714 if (i == ATOM_DEVICE_DFP1_INDEX)
715 bios_connectors[i].hpd.hpd = RADEON_HPD_1;
716 else if (i == ATOM_DEVICE_DFP2_INDEX)
717 bios_connectors[i].hpd.hpd = RADEON_HPD_2;
718 else
719 bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
720 }
587 721
588 /* Always set the connector type to VGA for CRT1/CRT2. if they are 722 /* Always set the connector type to VGA for CRT1/CRT2. if they are
589 * shared with a DVI port, we'll pick up the DVI connector when we 723 * shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
595 729
596 if (!radeon_atom_apply_quirks 730 if (!radeon_atom_apply_quirks
597 (dev, (1 << i), &bios_connectors[i].connector_type, 731 (dev, (1 << i), &bios_connectors[i].connector_type,
598 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) 732 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
733 &bios_connectors[i].hpd))
599 continue; 734 continue;
600 735
601 bios_connectors[i].valid = true; 736 bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
617 } 752 }
618 753
619 /* combine shared connectors */ 754 /* combine shared connectors */
620 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 755 for (i = 0; i < max_device; i++) {
621 if (bios_connectors[i].valid) { 756 if (bios_connectors[i].valid) {
622 for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { 757 for (j = 0; j < max_device; j++) {
623 if (bios_connectors[j].valid && (i != j)) { 758 if (bios_connectors[j].valid && (i != j)) {
624 if (bios_connectors[i].line_mux == 759 if (bios_connectors[i].line_mux ==
625 bios_connectors[j].line_mux) { 760 bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
643 bios_connectors[i]. 778 bios_connectors[i].
644 connector_type = 779 connector_type =
645 DRM_MODE_CONNECTOR_DVII; 780 DRM_MODE_CONNECTOR_DVII;
781 if (bios_connectors[j].devices &
782 (ATOM_DEVICE_DFP_SUPPORT))
783 bios_connectors[i].hpd =
784 bios_connectors[j].hpd;
646 bios_connectors[j]. 785 bios_connectors[j].
647 valid = false; 786 valid = false;
648 } 787 }
@@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
653 } 792 }
654 793
655 /* add the connectors */ 794 /* add the connectors */
656 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 795 for (i = 0; i < max_device; i++) {
657 if (bios_connectors[i].valid) { 796 if (bios_connectors[i].valid) {
658 uint16_t connector_object_id = 797 uint16_t connector_object_id =
659 atombios_get_connector_object_id(dev, 798 atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
666 connector_type, 805 connector_type,
667 &bios_connectors[i].ddc_bus, 806 &bios_connectors[i].ddc_bus,
668 false, 0, 807 false, 0,
669 connector_object_id); 808 connector_object_id,
809 &bios_connectors[i].hpd);
670 } 810 }
671 } 811 }
672 812
@@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
731 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per 871 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
732 * family. 872 * family.
733 */ 873 */
734 p1pll->pll_out_min = 64800; 874 if (!radeon_new_pll)
875 p1pll->pll_out_min = 64800;
735 } 876 }
736 877
737 p1pll->pll_in_min = 878 p1pll->pll_in_min =
@@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
861 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1002 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
862 uint8_t frev, crev; 1003 uint8_t frev, crev;
863 struct radeon_atom_ss *ss = NULL; 1004 struct radeon_atom_ss *ss = NULL;
1005 int i;
864 1006
865 if (id > ATOM_MAX_SS_ENTRY) 1007 if (id > ATOM_MAX_SS_ENTRY)
866 return NULL; 1008 return NULL;
@@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
878 if (!ss) 1020 if (!ss)
879 return NULL; 1021 return NULL;
880 1022
881 ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); 1023 for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
882 ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; 1024 if (ss_info->asSS_Info[i].ucSS_Id == id) {
883 ss->step = ss_info->asSS_Info[id].ucSS_Step; 1025 ss->percentage =
884 ss->delay = ss_info->asSS_Info[id].ucSS_Delay; 1026 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
885 ss->range = ss_info->asSS_Info[id].ucSS_Range; 1027 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
886 ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; 1028 ss->step = ss_info->asSS_Info[i].ucSS_Step;
1029 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1030 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1031 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1032 }
1033 }
887 } 1034 }
888 return ss; 1035 return ss;
889} 1036}
@@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
901 struct radeon_device *rdev = dev->dev_private; 1048 struct radeon_device *rdev = dev->dev_private;
902 struct radeon_mode_info *mode_info = &rdev->mode_info; 1049 struct radeon_mode_info *mode_info = &rdev->mode_info;
903 int index = GetIndexIntoMasterTable(DATA, LVDS_Info); 1050 int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
904 uint16_t data_offset; 1051 uint16_t data_offset, misc;
905 union lvds_info *lvds_info; 1052 union lvds_info *lvds_info;
906 uint8_t frev, crev; 1053 uint8_t frev, crev;
907 struct radeon_encoder_atom_dig *lvds = NULL; 1054 struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
940 lvds->panel_pwr_delay = 1087 lvds->panel_pwr_delay =
941 le16_to_cpu(lvds_info->info.usOffDelayInMs); 1088 le16_to_cpu(lvds_info->info.usOffDelayInMs);
942 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; 1089 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
1090
1091 misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
1092 if (misc & ATOM_VSYNC_POLARITY)
1093 lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
1094 if (misc & ATOM_HSYNC_POLARITY)
1095 lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
1096 if (misc & ATOM_COMPOSITESYNC)
1097 lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
1098 if (misc & ATOM_INTERLACE)
1099 lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
1100 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1101 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1102
943 /* set crtc values */ 1103 /* set crtc values */
944 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1104 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
945 1105
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db87..4ddfd4b5bc51 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, 29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
30 unsigned sdomain, unsigned ddomain) 30 unsigned sdomain, unsigned ddomain)
31{ 31{
32 struct radeon_object *dobj = NULL; 32 struct radeon_bo *dobj = NULL;
33 struct radeon_object *sobj = NULL; 33 struct radeon_bo *sobj = NULL;
34 struct radeon_fence *fence = NULL; 34 struct radeon_fence *fence = NULL;
35 uint64_t saddr, daddr; 35 uint64_t saddr, daddr;
36 unsigned long start_jiffies; 36 unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); 44 r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
48 r = radeon_object_pin(sobj, sdomain, &saddr); 48 r = radeon_bo_reserve(sobj, false);
49 if (unlikely(r != 0))
50 goto out_cleanup;
51 r = radeon_bo_pin(sobj, sdomain, &saddr);
52 radeon_bo_unreserve(sobj);
49 if (r) { 53 if (r) {
50 goto out_cleanup; 54 goto out_cleanup;
51 } 55 }
52 r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); 56 r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
53 if (r) { 57 if (r) {
54 goto out_cleanup; 58 goto out_cleanup;
55 } 59 }
56 r = radeon_object_pin(dobj, ddomain, &daddr); 60 r = radeon_bo_reserve(dobj, false);
61 if (unlikely(r != 0))
62 goto out_cleanup;
63 r = radeon_bo_pin(dobj, ddomain, &daddr);
64 radeon_bo_unreserve(dobj);
57 if (r) { 65 if (r) {
58 goto out_cleanup; 66 goto out_cleanup;
59 } 67 }
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
109 } 117 }
110out_cleanup: 118out_cleanup:
111 if (sobj) { 119 if (sobj) {
112 radeon_object_unpin(sobj); 120 r = radeon_bo_reserve(sobj, false);
113 radeon_object_unref(&sobj); 121 if (likely(r == 0)) {
122 radeon_bo_unpin(sobj);
123 radeon_bo_unreserve(sobj);
124 }
125 radeon_bo_unref(&sobj);
114 } 126 }
115 if (dobj) { 127 if (dobj) {
116 radeon_object_unpin(dobj); 128 r = radeon_bo_reserve(dobj, false);
117 radeon_object_unref(&dobj); 129 if (likely(r == 0)) {
130 radeon_bo_unpin(dobj);
131 radeon_bo_unreserve(dobj);
132 }
133 radeon_bo_unref(&dobj);
118 } 134 }
119 if (fence) { 135 if (fence) {
120 radeon_fence_unref(&fence); 136 radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a81354167621..b062109efbee 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
44 44
45 ref_div = 45 ref_div =
46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; 46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
47
48 if (ref_div == 0)
49 return 0;
50
47 sclk = fb_div / ref_div; 51 sclk = fb_div / ref_div;
48 52
49 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; 53 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
70 74
71 ref_div = 75 ref_div =
72 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; 76 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
77
78 if (ref_div == 0)
79 return 0;
80
73 mclk = fb_div / ref_div; 81 mclk = fb_div / ref_div;
74 82
75 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; 83 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
98 ret = radeon_combios_get_clock_info(dev); 106 ret = radeon_combios_get_clock_info(dev);
99 107
100 if (ret) { 108 if (ret) {
101 if (p1pll->reference_div < 2) 109 if (p1pll->reference_div < 2) {
102 p1pll->reference_div = 12; 110 if (!ASIC_IS_AVIVO(rdev)) {
111 u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
112 if (ASIC_IS_R300(rdev))
113 p1pll->reference_div =
114 (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
115 else
116 p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
117 if (p1pll->reference_div < 2)
118 p1pll->reference_div = 12;
119 } else
120 p1pll->reference_div = 12;
121 }
103 if (p2pll->reference_div < 2) 122 if (p2pll->reference_div < 2)
104 p2pll->reference_div = 12; 123 p2pll->reference_div = 12;
105 if (rdev->family < CHIP_RS600) { 124 if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf6db1f..c5021a3445de 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
50 uint32_t supported_device, 50 uint32_t supported_device,
51 int connector_type, 51 int connector_type,
52 struct radeon_i2c_bus_rec *i2c_bus, 52 struct radeon_i2c_bus_rec *i2c_bus,
53 uint16_t connector_object_id); 53 uint16_t connector_object_id,
54 struct radeon_hpd *hpd);
54 55
55/* from radeon_legacy_encoder.c */ 56/* from radeon_legacy_encoder.c */
56extern void 57extern void
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
442 443
443} 444}
444 445
445struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) 446static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
447 int ddc_line)
446{ 448{
447 struct radeon_i2c_bus_rec i2c; 449 struct radeon_i2c_bus_rec i2c;
448 450
449 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 451 if (ddc_line == RADEON_GPIOPAD_MASK) {
450 i2c.mask_data_mask = RADEON_GPIO_EN_0; 452 i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
451 i2c.a_clk_mask = RADEON_GPIO_A_1; 453 i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
452 i2c.a_data_mask = RADEON_GPIO_A_0; 454 i2c.a_clk_reg = RADEON_GPIOPAD_A;
453 i2c.put_clk_mask = RADEON_GPIO_EN_1; 455 i2c.a_data_reg = RADEON_GPIOPAD_A;
454 i2c.put_data_mask = RADEON_GPIO_EN_0; 456 i2c.en_clk_reg = RADEON_GPIOPAD_EN;
455 i2c.get_clk_mask = RADEON_GPIO_Y_1; 457 i2c.en_data_reg = RADEON_GPIOPAD_EN;
456 i2c.get_data_mask = RADEON_GPIO_Y_0; 458 i2c.y_clk_reg = RADEON_GPIOPAD_Y;
457 if ((ddc_line == RADEON_LCD_GPIO_MASK) || 459 i2c.y_data_reg = RADEON_GPIOPAD_Y;
458 (ddc_line == RADEON_MDGPIO_EN_REG)) { 460 } else if (ddc_line == RADEON_MDGPIO_MASK) {
459 i2c.mask_clk_reg = ddc_line; 461 i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
460 i2c.mask_data_reg = ddc_line; 462 i2c.mask_data_reg = RADEON_MDGPIO_MASK;
461 i2c.a_clk_reg = ddc_line; 463 i2c.a_clk_reg = RADEON_MDGPIO_A;
462 i2c.a_data_reg = ddc_line; 464 i2c.a_data_reg = RADEON_MDGPIO_A;
463 i2c.put_clk_reg = ddc_line; 465 i2c.en_clk_reg = RADEON_MDGPIO_EN;
464 i2c.put_data_reg = ddc_line; 466 i2c.en_data_reg = RADEON_MDGPIO_EN;
465 i2c.get_clk_reg = ddc_line + 4; 467 i2c.y_clk_reg = RADEON_MDGPIO_Y;
466 i2c.get_data_reg = ddc_line + 4; 468 i2c.y_data_reg = RADEON_MDGPIO_Y;
467 } else { 469 } else {
470 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
471 i2c.mask_data_mask = RADEON_GPIO_EN_0;
472 i2c.a_clk_mask = RADEON_GPIO_A_1;
473 i2c.a_data_mask = RADEON_GPIO_A_0;
474 i2c.en_clk_mask = RADEON_GPIO_EN_1;
475 i2c.en_data_mask = RADEON_GPIO_EN_0;
476 i2c.y_clk_mask = RADEON_GPIO_Y_1;
477 i2c.y_data_mask = RADEON_GPIO_Y_0;
478
468 i2c.mask_clk_reg = ddc_line; 479 i2c.mask_clk_reg = ddc_line;
469 i2c.mask_data_reg = ddc_line; 480 i2c.mask_data_reg = ddc_line;
470 i2c.a_clk_reg = ddc_line; 481 i2c.a_clk_reg = ddc_line;
471 i2c.a_data_reg = ddc_line; 482 i2c.a_data_reg = ddc_line;
472 i2c.put_clk_reg = ddc_line; 483 i2c.en_clk_reg = ddc_line;
473 i2c.put_data_reg = ddc_line; 484 i2c.en_data_reg = ddc_line;
474 i2c.get_clk_reg = ddc_line; 485 i2c.y_clk_reg = ddc_line;
475 i2c.get_data_reg = ddc_line; 486 i2c.y_data_reg = ddc_line;
487 }
488
489 if (rdev->family < CHIP_R200)
490 i2c.hw_capable = false;
491 else {
492 switch (ddc_line) {
493 case RADEON_GPIO_VGA_DDC:
494 case RADEON_GPIO_DVI_DDC:
495 i2c.hw_capable = true;
496 break;
497 case RADEON_GPIO_MONID:
498 /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
499 * reliably on some pre-r4xx hardware; not sure why.
500 */
501 i2c.hw_capable = false;
502 break;
503 default:
504 i2c.hw_capable = false;
505 break;
506 }
476 } 507 }
508 i2c.mm_i2c = false;
509 i2c.i2c_id = 0;
477 510
478 if (ddc_line) 511 if (ddc_line)
479 i2c.valid = true; 512 i2c.valid = true;
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
495 uint16_t sclk, mclk; 528 uint16_t sclk, mclk;
496 529
497 if (rdev->bios == NULL) 530 if (rdev->bios == NULL)
498 return NULL; 531 return false;
499 532
500 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); 533 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
501 if (pll_info) { 534 if (pll_info) {
@@ -993,8 +1026,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
993 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ 1026 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
994 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ 1027 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
995 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ 1028 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
996 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */ 1029 { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
997 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ 1030 { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
998}; 1031};
999 1032
1000bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, 1033bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1061,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1028 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); 1061 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1029 1062
1030 if (tmds_info) { 1063 if (tmds_info) {
1031
1032 ver = RBIOS8(tmds_info); 1064 ver = RBIOS8(tmds_info);
1033 DRM_INFO("DFP table revision: %d\n", ver); 1065 DRM_INFO("DFP table revision: %d\n", ver);
1034 if (ver == 3) { 1066 if (ver == 3) {
@@ -1063,51 +1095,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1063 tmds->tmds_pll[i].value); 1095 tmds->tmds_pll[i].value);
1064 } 1096 }
1065 } 1097 }
1066 } else 1098 } else {
1067 DRM_INFO("No TMDS info found in BIOS\n"); 1099 DRM_INFO("No TMDS info found in BIOS\n");
1100 return false;
1101 }
1068 return true; 1102 return true;
1069} 1103}
1070 1104
1071struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) 1105bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
1106 struct radeon_encoder_ext_tmds *tmds)
1072{ 1107{
1073 struct radeon_encoder_int_tmds *tmds = NULL; 1108 struct drm_device *dev = encoder->base.dev;
1074 bool ret; 1109 struct radeon_device *rdev = dev->dev_private;
1110 struct radeon_i2c_bus_rec i2c_bus;
1075 1111
1076 tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); 1112 /* default for macs */
1113 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1114 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1077 1115
1078 if (!tmds) 1116 /* XXX some macs have duallink chips */
1079 return NULL; 1117 switch (rdev->mode_info.connector_table) {
1080 1118 case CT_POWERBOOK_EXTERNAL:
1081 ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); 1119 case CT_MINI_EXTERNAL:
1082 if (ret == false) 1120 default:
1083 radeon_legacy_get_tmds_info_from_table(encoder, tmds); 1121 tmds->dvo_chip = DVO_SIL164;
1122 tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
1123 break;
1124 }
1084 1125
1085 return tmds; 1126 return true;
1086} 1127}
1087 1128
1088void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) 1129bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
1130 struct radeon_encoder_ext_tmds *tmds)
1089{ 1131{
1090 struct drm_device *dev = encoder->base.dev; 1132 struct drm_device *dev = encoder->base.dev;
1091 struct radeon_device *rdev = dev->dev_private; 1133 struct radeon_device *rdev = dev->dev_private;
1092 uint16_t ext_tmds_info; 1134 uint16_t offset;
1093 uint8_t ver; 1135 uint8_t ver, id, blocks, clk, data;
1136 int i;
1137 enum radeon_combios_ddc gpio;
1138 struct radeon_i2c_bus_rec i2c_bus;
1094 1139
1095 if (rdev->bios == NULL) 1140 if (rdev->bios == NULL)
1096 return; 1141 return false;
1097 1142
1098 ext_tmds_info = 1143 tmds->i2c_bus = NULL;
1099 combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); 1144 if (rdev->flags & RADEON_IS_IGP) {
1100 if (ext_tmds_info) { 1145 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
1101 ver = RBIOS8(ext_tmds_info); 1146 if (offset) {
1102 DRM_INFO("External TMDS Table revision: %d\n", ver); 1147 ver = RBIOS8(offset);
1103 // TODO 1148 DRM_INFO("GPIO Table revision: %d\n", ver);
1149 blocks = RBIOS8(offset + 2);
1150 for (i = 0; i < blocks; i++) {
1151 id = RBIOS8(offset + 3 + (i * 5) + 0);
1152 if (id == 136) {
1153 clk = RBIOS8(offset + 3 + (i * 5) + 3);
1154 data = RBIOS8(offset + 3 + (i * 5) + 4);
1155 i2c_bus.valid = true;
1156 i2c_bus.mask_clk_mask = (1 << clk);
1157 i2c_bus.mask_data_mask = (1 << data);
1158 i2c_bus.a_clk_mask = (1 << clk);
1159 i2c_bus.a_data_mask = (1 << data);
1160 i2c_bus.en_clk_mask = (1 << clk);
1161 i2c_bus.en_data_mask = (1 << data);
1162 i2c_bus.y_clk_mask = (1 << clk);
1163 i2c_bus.y_data_mask = (1 << data);
1164 i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
1165 i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
1166 i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
1167 i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
1168 i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
1169 i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
1170 i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
1171 i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
1172 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1173 tmds->dvo_chip = DVO_SIL164;
1174 tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
1175 break;
1176 }
1177 }
1178 }
1179 } else {
1180 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1181 if (offset) {
1182 ver = RBIOS8(offset);
1183 DRM_INFO("External TMDS Table revision: %d\n", ver);
1184 tmds->slave_addr = RBIOS8(offset + 4 + 2);
1185 tmds->slave_addr >>= 1; /* 7 bit addressing */
1186 gpio = RBIOS8(offset + 4 + 3);
1187 switch (gpio) {
1188 case DDC_MONID:
1189 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1190 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1191 break;
1192 case DDC_DVI:
1193 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1194 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1195 break;
1196 case DDC_VGA:
1197 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1198 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1199 break;
1200 case DDC_CRT2:
1201 /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
1202 if (rdev->family >= CHIP_R300)
1203 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1204 else
1205 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1206 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1207 break;
1208 case DDC_LCD: /* MM i2c */
1209 DRM_ERROR("MM i2c requires hw i2c engine\n");
1210 break;
1211 default:
1212 DRM_ERROR("Unsupported gpio %d\n", gpio);
1213 break;
1214 }
1215 }
1104 } 1216 }
1217
1218 if (!tmds->i2c_bus) {
1219 DRM_INFO("No valid Ext TMDS info found in BIOS\n");
1220 return false;
1221 }
1222
1223 return true;
1105} 1224}
1106 1225
1107bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) 1226bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1108{ 1227{
1109 struct radeon_device *rdev = dev->dev_private; 1228 struct radeon_device *rdev = dev->dev_private;
1110 struct radeon_i2c_bus_rec ddc_i2c; 1229 struct radeon_i2c_bus_rec ddc_i2c;
1230 struct radeon_hpd hpd;
1111 1231
1112 rdev->mode_info.connector_table = radeon_connector_table; 1232 rdev->mode_info.connector_table = radeon_connector_table;
1113 if (rdev->mode_info.connector_table == CT_NONE) { 1233 if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1288,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1168 /* these are the most common settings */ 1288 /* these are the most common settings */
1169 if (rdev->flags & RADEON_SINGLE_CRTC) { 1289 if (rdev->flags & RADEON_SINGLE_CRTC) {
1170 /* VGA - primary dac */ 1290 /* VGA - primary dac */
1171 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1291 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1292 hpd.hpd = RADEON_HPD_NONE;
1172 radeon_add_legacy_encoder(dev, 1293 radeon_add_legacy_encoder(dev,
1173 radeon_get_encoder_id(dev, 1294 radeon_get_encoder_id(dev,
1174 ATOM_DEVICE_CRT1_SUPPORT, 1295 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1299,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1178 ATOM_DEVICE_CRT1_SUPPORT, 1299 ATOM_DEVICE_CRT1_SUPPORT,
1179 DRM_MODE_CONNECTOR_VGA, 1300 DRM_MODE_CONNECTOR_VGA,
1180 &ddc_i2c, 1301 &ddc_i2c,
1181 CONNECTOR_OBJECT_ID_VGA); 1302 CONNECTOR_OBJECT_ID_VGA,
1303 &hpd);
1182 } else if (rdev->flags & RADEON_IS_MOBILITY) { 1304 } else if (rdev->flags & RADEON_IS_MOBILITY) {
1183 /* LVDS */ 1305 /* LVDS */
1184 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); 1306 ddc_i2c = combios_setup_i2c_bus(rdev, 0);
1307 hpd.hpd = RADEON_HPD_NONE;
1185 radeon_add_legacy_encoder(dev, 1308 radeon_add_legacy_encoder(dev,
1186 radeon_get_encoder_id(dev, 1309 radeon_get_encoder_id(dev,
1187 ATOM_DEVICE_LCD1_SUPPORT, 1310 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1314,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1191 ATOM_DEVICE_LCD1_SUPPORT, 1314 ATOM_DEVICE_LCD1_SUPPORT,
1192 DRM_MODE_CONNECTOR_LVDS, 1315 DRM_MODE_CONNECTOR_LVDS,
1193 &ddc_i2c, 1316 &ddc_i2c,
1194 CONNECTOR_OBJECT_ID_LVDS); 1317 CONNECTOR_OBJECT_ID_LVDS,
1318 &hpd);
1195 1319
1196 /* VGA - primary dac */ 1320 /* VGA - primary dac */
1197 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1321 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1322 hpd.hpd = RADEON_HPD_NONE;
1198 radeon_add_legacy_encoder(dev, 1323 radeon_add_legacy_encoder(dev,
1199 radeon_get_encoder_id(dev, 1324 radeon_get_encoder_id(dev,
1200 ATOM_DEVICE_CRT1_SUPPORT, 1325 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1329,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1204 ATOM_DEVICE_CRT1_SUPPORT, 1329 ATOM_DEVICE_CRT1_SUPPORT,
1205 DRM_MODE_CONNECTOR_VGA, 1330 DRM_MODE_CONNECTOR_VGA,
1206 &ddc_i2c, 1331 &ddc_i2c,
1207 CONNECTOR_OBJECT_ID_VGA); 1332 CONNECTOR_OBJECT_ID_VGA,
1333 &hpd);
1208 } else { 1334 } else {
1209 /* DVI-I - tv dac, int tmds */ 1335 /* DVI-I - tv dac, int tmds */
1210 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1336 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1337 hpd.hpd = RADEON_HPD_1;
1211 radeon_add_legacy_encoder(dev, 1338 radeon_add_legacy_encoder(dev,
1212 radeon_get_encoder_id(dev, 1339 radeon_get_encoder_id(dev,
1213 ATOM_DEVICE_DFP1_SUPPORT, 1340 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1350,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1223 ATOM_DEVICE_CRT2_SUPPORT, 1350 ATOM_DEVICE_CRT2_SUPPORT,
1224 DRM_MODE_CONNECTOR_DVII, 1351 DRM_MODE_CONNECTOR_DVII,
1225 &ddc_i2c, 1352 &ddc_i2c,
1226 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1353 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1354 &hpd);
1227 1355
1228 /* VGA - primary dac */ 1356 /* VGA - primary dac */
1229 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1357 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1358 hpd.hpd = RADEON_HPD_NONE;
1230 radeon_add_legacy_encoder(dev, 1359 radeon_add_legacy_encoder(dev,
1231 radeon_get_encoder_id(dev, 1360 radeon_get_encoder_id(dev,
1232 ATOM_DEVICE_CRT1_SUPPORT, 1361 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1365,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1236 ATOM_DEVICE_CRT1_SUPPORT, 1365 ATOM_DEVICE_CRT1_SUPPORT,
1237 DRM_MODE_CONNECTOR_VGA, 1366 DRM_MODE_CONNECTOR_VGA,
1238 &ddc_i2c, 1367 &ddc_i2c,
1239 CONNECTOR_OBJECT_ID_VGA); 1368 CONNECTOR_OBJECT_ID_VGA,
1369 &hpd);
1240 } 1370 }
1241 1371
1242 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { 1372 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1243 /* TV - tv dac */ 1373 /* TV - tv dac */
1374 ddc_i2c.valid = false;
1375 hpd.hpd = RADEON_HPD_NONE;
1244 radeon_add_legacy_encoder(dev, 1376 radeon_add_legacy_encoder(dev,
1245 radeon_get_encoder_id(dev, 1377 radeon_get_encoder_id(dev,
1246 ATOM_DEVICE_TV1_SUPPORT, 1378 ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1382,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1250 ATOM_DEVICE_TV1_SUPPORT, 1382 ATOM_DEVICE_TV1_SUPPORT,
1251 DRM_MODE_CONNECTOR_SVIDEO, 1383 DRM_MODE_CONNECTOR_SVIDEO,
1252 &ddc_i2c, 1384 &ddc_i2c,
1253 CONNECTOR_OBJECT_ID_SVIDEO); 1385 CONNECTOR_OBJECT_ID_SVIDEO,
1386 &hpd);
1254 } 1387 }
1255 break; 1388 break;
1256 case CT_IBOOK: 1389 case CT_IBOOK:
1257 DRM_INFO("Connector Table: %d (ibook)\n", 1390 DRM_INFO("Connector Table: %d (ibook)\n",
1258 rdev->mode_info.connector_table); 1391 rdev->mode_info.connector_table);
1259 /* LVDS */ 1392 /* LVDS */
1260 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1393 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1394 hpd.hpd = RADEON_HPD_NONE;
1261 radeon_add_legacy_encoder(dev, 1395 radeon_add_legacy_encoder(dev,
1262 radeon_get_encoder_id(dev, 1396 radeon_get_encoder_id(dev,
1263 ATOM_DEVICE_LCD1_SUPPORT, 1397 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1399,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1265 ATOM_DEVICE_LCD1_SUPPORT); 1399 ATOM_DEVICE_LCD1_SUPPORT);
1266 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1400 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1267 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1401 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1268 CONNECTOR_OBJECT_ID_LVDS); 1402 CONNECTOR_OBJECT_ID_LVDS,
1403 &hpd);
1269 /* VGA - TV DAC */ 1404 /* VGA - TV DAC */
1270 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1405 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1406 hpd.hpd = RADEON_HPD_NONE;
1271 radeon_add_legacy_encoder(dev, 1407 radeon_add_legacy_encoder(dev,
1272 radeon_get_encoder_id(dev, 1408 radeon_get_encoder_id(dev,
1273 ATOM_DEVICE_CRT2_SUPPORT, 1409 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1275 ATOM_DEVICE_CRT2_SUPPORT); 1411 ATOM_DEVICE_CRT2_SUPPORT);
1276 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1412 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1277 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1413 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1278 CONNECTOR_OBJECT_ID_VGA); 1414 CONNECTOR_OBJECT_ID_VGA,
1415 &hpd);
1279 /* TV - TV DAC */ 1416 /* TV - TV DAC */
1417 ddc_i2c.valid = false;
1418 hpd.hpd = RADEON_HPD_NONE;
1280 radeon_add_legacy_encoder(dev, 1419 radeon_add_legacy_encoder(dev,
1281 radeon_get_encoder_id(dev, 1420 radeon_get_encoder_id(dev,
1282 ATOM_DEVICE_TV1_SUPPORT, 1421 ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1424,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1285 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1424 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1286 DRM_MODE_CONNECTOR_SVIDEO, 1425 DRM_MODE_CONNECTOR_SVIDEO,
1287 &ddc_i2c, 1426 &ddc_i2c,
1288 CONNECTOR_OBJECT_ID_SVIDEO); 1427 CONNECTOR_OBJECT_ID_SVIDEO,
1428 &hpd);
1289 break; 1429 break;
1290 case CT_POWERBOOK_EXTERNAL: 1430 case CT_POWERBOOK_EXTERNAL:
1291 DRM_INFO("Connector Table: %d (powerbook external tmds)\n", 1431 DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
1292 rdev->mode_info.connector_table); 1432 rdev->mode_info.connector_table);
1293 /* LVDS */ 1433 /* LVDS */
1294 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1434 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1435 hpd.hpd = RADEON_HPD_NONE;
1295 radeon_add_legacy_encoder(dev, 1436 radeon_add_legacy_encoder(dev,
1296 radeon_get_encoder_id(dev, 1437 radeon_get_encoder_id(dev,
1297 ATOM_DEVICE_LCD1_SUPPORT, 1438 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1440,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1299 ATOM_DEVICE_LCD1_SUPPORT); 1440 ATOM_DEVICE_LCD1_SUPPORT);
1300 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1441 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1301 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1442 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1302 CONNECTOR_OBJECT_ID_LVDS); 1443 CONNECTOR_OBJECT_ID_LVDS,
1444 &hpd);
1303 /* DVI-I - primary dac, ext tmds */ 1445 /* DVI-I - primary dac, ext tmds */
1304 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1446 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1447 hpd.hpd = RADEON_HPD_2; /* ??? */
1305 radeon_add_legacy_encoder(dev, 1448 radeon_add_legacy_encoder(dev,
1306 radeon_get_encoder_id(dev, 1449 radeon_get_encoder_id(dev,
1307 ATOM_DEVICE_DFP2_SUPPORT, 1450 ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1460,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1317 ATOM_DEVICE_DFP2_SUPPORT | 1460 ATOM_DEVICE_DFP2_SUPPORT |
1318 ATOM_DEVICE_CRT1_SUPPORT, 1461 ATOM_DEVICE_CRT1_SUPPORT,
1319 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1462 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1320 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I); 1463 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
1464 &hpd);
1321 /* TV - TV DAC */ 1465 /* TV - TV DAC */
1466 ddc_i2c.valid = false;
1467 hpd.hpd = RADEON_HPD_NONE;
1322 radeon_add_legacy_encoder(dev, 1468 radeon_add_legacy_encoder(dev,
1323 radeon_get_encoder_id(dev, 1469 radeon_get_encoder_id(dev,
1324 ATOM_DEVICE_TV1_SUPPORT, 1470 ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1473,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1327 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1473 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1328 DRM_MODE_CONNECTOR_SVIDEO, 1474 DRM_MODE_CONNECTOR_SVIDEO,
1329 &ddc_i2c, 1475 &ddc_i2c,
1330 CONNECTOR_OBJECT_ID_SVIDEO); 1476 CONNECTOR_OBJECT_ID_SVIDEO,
1477 &hpd);
1331 break; 1478 break;
1332 case CT_POWERBOOK_INTERNAL: 1479 case CT_POWERBOOK_INTERNAL:
1333 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", 1480 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
1334 rdev->mode_info.connector_table); 1481 rdev->mode_info.connector_table);
1335 /* LVDS */ 1482 /* LVDS */
1336 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1483 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1484 hpd.hpd = RADEON_HPD_NONE;
1337 radeon_add_legacy_encoder(dev, 1485 radeon_add_legacy_encoder(dev,
1338 radeon_get_encoder_id(dev, 1486 radeon_get_encoder_id(dev,
1339 ATOM_DEVICE_LCD1_SUPPORT, 1487 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1489,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1341 ATOM_DEVICE_LCD1_SUPPORT); 1489 ATOM_DEVICE_LCD1_SUPPORT);
1342 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1490 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1343 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1491 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1344 CONNECTOR_OBJECT_ID_LVDS); 1492 CONNECTOR_OBJECT_ID_LVDS,
1493 &hpd);
1345 /* DVI-I - primary dac, int tmds */ 1494 /* DVI-I - primary dac, int tmds */
1346 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1495 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1496 hpd.hpd = RADEON_HPD_1; /* ??? */
1347 radeon_add_legacy_encoder(dev, 1497 radeon_add_legacy_encoder(dev,
1348 radeon_get_encoder_id(dev, 1498 radeon_get_encoder_id(dev,
1349 ATOM_DEVICE_DFP1_SUPPORT, 1499 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1358 ATOM_DEVICE_DFP1_SUPPORT | 1508 ATOM_DEVICE_DFP1_SUPPORT |
1359 ATOM_DEVICE_CRT1_SUPPORT, 1509 ATOM_DEVICE_CRT1_SUPPORT,
1360 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1510 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1361 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1511 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1512 &hpd);
1362 /* TV - TV DAC */ 1513 /* TV - TV DAC */
1514 ddc_i2c.valid = false;
1515 hpd.hpd = RADEON_HPD_NONE;
1363 radeon_add_legacy_encoder(dev, 1516 radeon_add_legacy_encoder(dev,
1364 radeon_get_encoder_id(dev, 1517 radeon_get_encoder_id(dev,
1365 ATOM_DEVICE_TV1_SUPPORT, 1518 ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1368 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1521 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1369 DRM_MODE_CONNECTOR_SVIDEO, 1522 DRM_MODE_CONNECTOR_SVIDEO,
1370 &ddc_i2c, 1523 &ddc_i2c,
1371 CONNECTOR_OBJECT_ID_SVIDEO); 1524 CONNECTOR_OBJECT_ID_SVIDEO,
1525 &hpd);
1372 break; 1526 break;
1373 case CT_POWERBOOK_VGA: 1527 case CT_POWERBOOK_VGA:
1374 DRM_INFO("Connector Table: %d (powerbook vga)\n", 1528 DRM_INFO("Connector Table: %d (powerbook vga)\n",
1375 rdev->mode_info.connector_table); 1529 rdev->mode_info.connector_table);
1376 /* LVDS */ 1530 /* LVDS */
1377 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1531 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1532 hpd.hpd = RADEON_HPD_NONE;
1378 radeon_add_legacy_encoder(dev, 1533 radeon_add_legacy_encoder(dev,
1379 radeon_get_encoder_id(dev, 1534 radeon_get_encoder_id(dev,
1380 ATOM_DEVICE_LCD1_SUPPORT, 1535 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1382 ATOM_DEVICE_LCD1_SUPPORT); 1537 ATOM_DEVICE_LCD1_SUPPORT);
1383 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1538 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1384 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1539 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1385 CONNECTOR_OBJECT_ID_LVDS); 1540 CONNECTOR_OBJECT_ID_LVDS,
1541 &hpd);
1386 /* VGA - primary dac */ 1542 /* VGA - primary dac */
1387 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1543 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1544 hpd.hpd = RADEON_HPD_NONE;
1388 radeon_add_legacy_encoder(dev, 1545 radeon_add_legacy_encoder(dev,
1389 radeon_get_encoder_id(dev, 1546 radeon_get_encoder_id(dev,
1390 ATOM_DEVICE_CRT1_SUPPORT, 1547 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1549,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1392 ATOM_DEVICE_CRT1_SUPPORT); 1549 ATOM_DEVICE_CRT1_SUPPORT);
1393 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, 1550 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
1394 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1551 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1395 CONNECTOR_OBJECT_ID_VGA); 1552 CONNECTOR_OBJECT_ID_VGA,
1553 &hpd);
1396 /* TV - TV DAC */ 1554 /* TV - TV DAC */
1555 ddc_i2c.valid = false;
1556 hpd.hpd = RADEON_HPD_NONE;
1397 radeon_add_legacy_encoder(dev, 1557 radeon_add_legacy_encoder(dev,
1398 radeon_get_encoder_id(dev, 1558 radeon_get_encoder_id(dev,
1399 ATOM_DEVICE_TV1_SUPPORT, 1559 ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1562,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1402 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1562 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1403 DRM_MODE_CONNECTOR_SVIDEO, 1563 DRM_MODE_CONNECTOR_SVIDEO,
1404 &ddc_i2c, 1564 &ddc_i2c,
1405 CONNECTOR_OBJECT_ID_SVIDEO); 1565 CONNECTOR_OBJECT_ID_SVIDEO,
1566 &hpd);
1406 break; 1567 break;
1407 case CT_MINI_EXTERNAL: 1568 case CT_MINI_EXTERNAL:
1408 DRM_INFO("Connector Table: %d (mini external tmds)\n", 1569 DRM_INFO("Connector Table: %d (mini external tmds)\n",
1409 rdev->mode_info.connector_table); 1570 rdev->mode_info.connector_table);
1410 /* DVI-I - tv dac, ext tmds */ 1571 /* DVI-I - tv dac, ext tmds */
1411 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1572 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1573 hpd.hpd = RADEON_HPD_2; /* ??? */
1412 radeon_add_legacy_encoder(dev, 1574 radeon_add_legacy_encoder(dev,
1413 radeon_get_encoder_id(dev, 1575 radeon_get_encoder_id(dev,
1414 ATOM_DEVICE_DFP2_SUPPORT, 1576 ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1586,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1424 ATOM_DEVICE_DFP2_SUPPORT | 1586 ATOM_DEVICE_DFP2_SUPPORT |
1425 ATOM_DEVICE_CRT2_SUPPORT, 1587 ATOM_DEVICE_CRT2_SUPPORT,
1426 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1588 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1427 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1589 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1590 &hpd);
1428 /* TV - TV DAC */ 1591 /* TV - TV DAC */
1592 ddc_i2c.valid = false;
1593 hpd.hpd = RADEON_HPD_NONE;
1429 radeon_add_legacy_encoder(dev, 1594 radeon_add_legacy_encoder(dev,
1430 radeon_get_encoder_id(dev, 1595 radeon_get_encoder_id(dev,
1431 ATOM_DEVICE_TV1_SUPPORT, 1596 ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1599,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1434 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1599 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1435 DRM_MODE_CONNECTOR_SVIDEO, 1600 DRM_MODE_CONNECTOR_SVIDEO,
1436 &ddc_i2c, 1601 &ddc_i2c,
1437 CONNECTOR_OBJECT_ID_SVIDEO); 1602 CONNECTOR_OBJECT_ID_SVIDEO,
1603 &hpd);
1438 break; 1604 break;
1439 case CT_MINI_INTERNAL: 1605 case CT_MINI_INTERNAL:
1440 DRM_INFO("Connector Table: %d (mini internal tmds)\n", 1606 DRM_INFO("Connector Table: %d (mini internal tmds)\n",
1441 rdev->mode_info.connector_table); 1607 rdev->mode_info.connector_table);
1442 /* DVI-I - tv dac, int tmds */ 1608 /* DVI-I - tv dac, int tmds */
1443 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1609 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1610 hpd.hpd = RADEON_HPD_1; /* ??? */
1444 radeon_add_legacy_encoder(dev, 1611 radeon_add_legacy_encoder(dev,
1445 radeon_get_encoder_id(dev, 1612 radeon_get_encoder_id(dev,
1446 ATOM_DEVICE_DFP1_SUPPORT, 1613 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1622,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1455 ATOM_DEVICE_DFP1_SUPPORT | 1622 ATOM_DEVICE_DFP1_SUPPORT |
1456 ATOM_DEVICE_CRT2_SUPPORT, 1623 ATOM_DEVICE_CRT2_SUPPORT,
1457 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1624 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1458 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1625 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1626 &hpd);
1459 /* TV - TV DAC */ 1627 /* TV - TV DAC */
1628 ddc_i2c.valid = false;
1629 hpd.hpd = RADEON_HPD_NONE;
1460 radeon_add_legacy_encoder(dev, 1630 radeon_add_legacy_encoder(dev,
1461 radeon_get_encoder_id(dev, 1631 radeon_get_encoder_id(dev,
1462 ATOM_DEVICE_TV1_SUPPORT, 1632 ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1635,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1465 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1635 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1466 DRM_MODE_CONNECTOR_SVIDEO, 1636 DRM_MODE_CONNECTOR_SVIDEO,
1467 &ddc_i2c, 1637 &ddc_i2c,
1468 CONNECTOR_OBJECT_ID_SVIDEO); 1638 CONNECTOR_OBJECT_ID_SVIDEO,
1639 &hpd);
1469 break; 1640 break;
1470 case CT_IMAC_G5_ISIGHT: 1641 case CT_IMAC_G5_ISIGHT:
1471 DRM_INFO("Connector Table: %d (imac g5 isight)\n", 1642 DRM_INFO("Connector Table: %d (imac g5 isight)\n",
1472 rdev->mode_info.connector_table); 1643 rdev->mode_info.connector_table);
1473 /* DVI-D - int tmds */ 1644 /* DVI-D - int tmds */
1474 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); 1645 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1646 hpd.hpd = RADEON_HPD_1; /* ??? */
1475 radeon_add_legacy_encoder(dev, 1647 radeon_add_legacy_encoder(dev,
1476 radeon_get_encoder_id(dev, 1648 radeon_get_encoder_id(dev,
1477 ATOM_DEVICE_DFP1_SUPPORT, 1649 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1651,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1479 ATOM_DEVICE_DFP1_SUPPORT); 1651 ATOM_DEVICE_DFP1_SUPPORT);
1480 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, 1652 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
1481 DRM_MODE_CONNECTOR_DVID, &ddc_i2c, 1653 DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
1482 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); 1654 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
1655 &hpd);
1483 /* VGA - tv dac */ 1656 /* VGA - tv dac */
1484 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1657 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1658 hpd.hpd = RADEON_HPD_NONE;
1485 radeon_add_legacy_encoder(dev, 1659 radeon_add_legacy_encoder(dev,
1486 radeon_get_encoder_id(dev, 1660 radeon_get_encoder_id(dev,
1487 ATOM_DEVICE_CRT2_SUPPORT, 1661 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1663,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1489 ATOM_DEVICE_CRT2_SUPPORT); 1663 ATOM_DEVICE_CRT2_SUPPORT);
1490 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1664 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1491 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1665 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1492 CONNECTOR_OBJECT_ID_VGA); 1666 CONNECTOR_OBJECT_ID_VGA,
1667 &hpd);
1493 /* TV - TV DAC */ 1668 /* TV - TV DAC */
1669 ddc_i2c.valid = false;
1670 hpd.hpd = RADEON_HPD_NONE;
1494 radeon_add_legacy_encoder(dev, 1671 radeon_add_legacy_encoder(dev,
1495 radeon_get_encoder_id(dev, 1672 radeon_get_encoder_id(dev,
1496 ATOM_DEVICE_TV1_SUPPORT, 1673 ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1676,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1499 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1676 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1500 DRM_MODE_CONNECTOR_SVIDEO, 1677 DRM_MODE_CONNECTOR_SVIDEO,
1501 &ddc_i2c, 1678 &ddc_i2c,
1502 CONNECTOR_OBJECT_ID_SVIDEO); 1679 CONNECTOR_OBJECT_ID_SVIDEO,
1680 &hpd);
1503 break; 1681 break;
1504 case CT_EMAC: 1682 case CT_EMAC:
1505 DRM_INFO("Connector Table: %d (emac)\n", 1683 DRM_INFO("Connector Table: %d (emac)\n",
1506 rdev->mode_info.connector_table); 1684 rdev->mode_info.connector_table);
1507 /* VGA - primary dac */ 1685 /* VGA - primary dac */
1508 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1686 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1687 hpd.hpd = RADEON_HPD_NONE;
1509 radeon_add_legacy_encoder(dev, 1688 radeon_add_legacy_encoder(dev,
1510 radeon_get_encoder_id(dev, 1689 radeon_get_encoder_id(dev,
1511 ATOM_DEVICE_CRT1_SUPPORT, 1690 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1692,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1513 ATOM_DEVICE_CRT1_SUPPORT); 1692 ATOM_DEVICE_CRT1_SUPPORT);
1514 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, 1693 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1515 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1694 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1516 CONNECTOR_OBJECT_ID_VGA); 1695 CONNECTOR_OBJECT_ID_VGA,
1696 &hpd);
1517 /* VGA - tv dac */ 1697 /* VGA - tv dac */
1518 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1698 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1699 hpd.hpd = RADEON_HPD_NONE;
1519 radeon_add_legacy_encoder(dev, 1700 radeon_add_legacy_encoder(dev,
1520 radeon_get_encoder_id(dev, 1701 radeon_get_encoder_id(dev,
1521 ATOM_DEVICE_CRT2_SUPPORT, 1702 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1704,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1523 ATOM_DEVICE_CRT2_SUPPORT); 1704 ATOM_DEVICE_CRT2_SUPPORT);
1524 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1705 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1525 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1706 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1526 CONNECTOR_OBJECT_ID_VGA); 1707 CONNECTOR_OBJECT_ID_VGA,
1708 &hpd);
1527 /* TV - TV DAC */ 1709 /* TV - TV DAC */
1710 ddc_i2c.valid = false;
1711 hpd.hpd = RADEON_HPD_NONE;
1528 radeon_add_legacy_encoder(dev, 1712 radeon_add_legacy_encoder(dev,
1529 radeon_get_encoder_id(dev, 1713 radeon_get_encoder_id(dev,
1530 ATOM_DEVICE_TV1_SUPPORT, 1714 ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1717,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1533 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1717 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1534 DRM_MODE_CONNECTOR_SVIDEO, 1718 DRM_MODE_CONNECTOR_SVIDEO,
1535 &ddc_i2c, 1719 &ddc_i2c,
1536 CONNECTOR_OBJECT_ID_SVIDEO); 1720 CONNECTOR_OBJECT_ID_SVIDEO,
1721 &hpd);
1537 break; 1722 break;
1538 default: 1723 default:
1539 DRM_INFO("Connector table: %d (invalid)\n", 1724 DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1735,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1550 int bios_index, 1735 int bios_index,
1551 enum radeon_combios_connector 1736 enum radeon_combios_connector
1552 *legacy_connector, 1737 *legacy_connector,
1553 struct radeon_i2c_bus_rec *ddc_i2c) 1738 struct radeon_i2c_bus_rec *ddc_i2c,
1739 struct radeon_hpd *hpd)
1554{ 1740{
1555 struct radeon_device *rdev = dev->dev_private; 1741 struct radeon_device *rdev = dev->dev_private;
1556 1742
@@ -1558,29 +1744,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1558 if ((rdev->family == CHIP_RS400 || 1744 if ((rdev->family == CHIP_RS400 ||
1559 rdev->family == CHIP_RS480) && 1745 rdev->family == CHIP_RS480) &&
1560 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) 1746 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1561 *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); 1747 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1562 else if ((rdev->family == CHIP_RS400 || 1748 else if ((rdev->family == CHIP_RS400 ||
1563 rdev->family == CHIP_RS480) && 1749 rdev->family == CHIP_RS480) &&
1564 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) { 1750 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
1565 ddc_i2c->valid = true; 1751 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
1566 ddc_i2c->mask_clk_mask = (0x20 << 8); 1752 ddc_i2c->mask_clk_mask = (0x20 << 8);
1567 ddc_i2c->mask_data_mask = 0x80; 1753 ddc_i2c->mask_data_mask = 0x80;
1568 ddc_i2c->a_clk_mask = (0x20 << 8); 1754 ddc_i2c->a_clk_mask = (0x20 << 8);
1569 ddc_i2c->a_data_mask = 0x80; 1755 ddc_i2c->a_data_mask = 0x80;
1570 ddc_i2c->put_clk_mask = (0x20 << 8); 1756 ddc_i2c->en_clk_mask = (0x20 << 8);
1571 ddc_i2c->put_data_mask = 0x80; 1757 ddc_i2c->en_data_mask = 0x80;
1572 ddc_i2c->get_clk_mask = (0x20 << 8); 1758 ddc_i2c->y_clk_mask = (0x20 << 8);
1573 ddc_i2c->get_data_mask = 0x80; 1759 ddc_i2c->y_data_mask = 0x80;
1574 ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
1575 ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
1576 ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
1577 ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
1578 ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
1579 ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
1580 ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
1581 ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
1582 } 1760 }
1583 1761
1762 /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
1763 if ((rdev->family >= CHIP_R300) &&
1764 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1765 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1766
1584 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, 1767 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
1585 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ 1768 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
1586 if (dev->pdev->device == 0x515e && 1769 if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1807,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
1624 dev->pdev->subsystem_device == 0x280a) 1807 dev->pdev->subsystem_device == 0x280a)
1625 return false; 1808 return false;
1626 1809
1810 /* MSI S270 has non-existent TV port */
1811 if (dev->pdev->device == 0x5955 &&
1812 dev->pdev->subsystem_vendor == 0x1462 &&
1813 dev->pdev->subsystem_device == 0x0131)
1814 return false;
1815
1627 return true; 1816 return true;
1628} 1817}
1629 1818
@@ -1671,6 +1860,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1671 enum radeon_combios_connector connector; 1860 enum radeon_combios_connector connector;
1672 int i = 0; 1861 int i = 0;
1673 struct radeon_i2c_bus_rec ddc_i2c; 1862 struct radeon_i2c_bus_rec ddc_i2c;
1863 struct radeon_hpd hpd;
1674 1864
1675 if (rdev->bios == NULL) 1865 if (rdev->bios == NULL)
1676 return false; 1866 return false;
@@ -1691,26 +1881,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1691 switch (ddc_type) { 1881 switch (ddc_type) {
1692 case DDC_MONID: 1882 case DDC_MONID:
1693 ddc_i2c = 1883 ddc_i2c =
1694 combios_setup_i2c_bus(RADEON_GPIO_MONID); 1884 combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1695 break; 1885 break;
1696 case DDC_DVI: 1886 case DDC_DVI:
1697 ddc_i2c = 1887 ddc_i2c =
1698 combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1888 combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1699 break; 1889 break;
1700 case DDC_VGA: 1890 case DDC_VGA:
1701 ddc_i2c = 1891 ddc_i2c =
1702 combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1892 combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1703 break; 1893 break;
1704 case DDC_CRT2: 1894 case DDC_CRT2:
1705 ddc_i2c = 1895 ddc_i2c =
1706 combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1896 combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1707 break; 1897 break;
1708 default: 1898 default:
1709 break; 1899 break;
1710 } 1900 }
1711 1901
1902 switch (connector) {
1903 case CONNECTOR_PROPRIETARY_LEGACY:
1904 case CONNECTOR_DVI_I_LEGACY:
1905 case CONNECTOR_DVI_D_LEGACY:
1906 if ((tmp >> 4) & 0x1)
1907 hpd.hpd = RADEON_HPD_2;
1908 else
1909 hpd.hpd = RADEON_HPD_1;
1910 break;
1911 default:
1912 hpd.hpd = RADEON_HPD_NONE;
1913 break;
1914 }
1915
1712 if (!radeon_apply_legacy_quirks(dev, i, &connector, 1916 if (!radeon_apply_legacy_quirks(dev, i, &connector,
1713 &ddc_i2c)) 1917 &ddc_i2c, &hpd))
1714 continue; 1918 continue;
1715 1919
1716 switch (connector) { 1920 switch (connector) {
@@ -1727,7 +1931,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1727 legacy_connector_convert 1931 legacy_connector_convert
1728 [connector], 1932 [connector],
1729 &ddc_i2c, 1933 &ddc_i2c,
1730 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); 1934 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
1935 &hpd);
1731 break; 1936 break;
1732 case CONNECTOR_CRT_LEGACY: 1937 case CONNECTOR_CRT_LEGACY:
1733 if (tmp & 0x1) { 1938 if (tmp & 0x1) {
@@ -1753,7 +1958,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1753 legacy_connector_convert 1958 legacy_connector_convert
1754 [connector], 1959 [connector],
1755 &ddc_i2c, 1960 &ddc_i2c,
1756 CONNECTOR_OBJECT_ID_VGA); 1961 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd);
1757 break; 1963 break;
1758 case CONNECTOR_DVI_I_LEGACY: 1964 case CONNECTOR_DVI_I_LEGACY:
1759 devices = 0; 1965 devices = 0;
@@ -1799,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1799 legacy_connector_convert 2005 legacy_connector_convert
1800 [connector], 2006 [connector],
1801 &ddc_i2c, 2007 &ddc_i2c,
1802 connector_object_id); 2008 connector_object_id,
2009 &hpd);
1803 break; 2010 break;
1804 case CONNECTOR_DVI_D_LEGACY: 2011 case CONNECTOR_DVI_D_LEGACY:
1805 if ((tmp >> 4) & 0x1) { 2012 if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2024,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1817 legacy_connector_convert 2024 legacy_connector_convert
1818 [connector], 2025 [connector],
1819 &ddc_i2c, 2026 &ddc_i2c,
1820 connector_object_id); 2027 connector_object_id,
2028 &hpd);
1821 break; 2029 break;
1822 case CONNECTOR_CTV_LEGACY: 2030 case CONNECTOR_CTV_LEGACY:
1823 case CONNECTOR_STV_LEGACY: 2031 case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2040,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1832 legacy_connector_convert 2040 legacy_connector_convert
1833 [connector], 2041 [connector],
1834 &ddc_i2c, 2042 &ddc_i2c,
1835 CONNECTOR_OBJECT_ID_SVIDEO); 2043 CONNECTOR_OBJECT_ID_SVIDEO,
2044 &hpd);
1836 break; 2045 break;
1837 default: 2046 default:
1838 DRM_ERROR("Unknown connector type: %d\n", 2047 DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2067,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1858 0), 2067 0),
1859 ATOM_DEVICE_DFP1_SUPPORT); 2068 ATOM_DEVICE_DFP1_SUPPORT);
1860 2069
1861 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 2070 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
2071 hpd.hpd = RADEON_HPD_NONE;
1862 radeon_add_legacy_connector(dev, 2072 radeon_add_legacy_connector(dev,
1863 0, 2073 0,
1864 ATOM_DEVICE_CRT1_SUPPORT | 2074 ATOM_DEVICE_CRT1_SUPPORT |
1865 ATOM_DEVICE_DFP1_SUPPORT, 2075 ATOM_DEVICE_DFP1_SUPPORT,
1866 DRM_MODE_CONNECTOR_DVII, 2076 DRM_MODE_CONNECTOR_DVII,
1867 &ddc_i2c, 2077 &ddc_i2c,
1868 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 2078 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2079 &hpd);
1869 } else { 2080 } else {
1870 uint16_t crt_info = 2081 uint16_t crt_info =
1871 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 2082 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2087,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1876 ATOM_DEVICE_CRT1_SUPPORT, 2087 ATOM_DEVICE_CRT1_SUPPORT,
1877 1), 2088 1),
1878 ATOM_DEVICE_CRT1_SUPPORT); 2089 ATOM_DEVICE_CRT1_SUPPORT);
1879 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 2090 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
2091 hpd.hpd = RADEON_HPD_NONE;
1880 radeon_add_legacy_connector(dev, 2092 radeon_add_legacy_connector(dev,
1881 0, 2093 0,
1882 ATOM_DEVICE_CRT1_SUPPORT, 2094 ATOM_DEVICE_CRT1_SUPPORT,
1883 DRM_MODE_CONNECTOR_VGA, 2095 DRM_MODE_CONNECTOR_VGA,
1884 &ddc_i2c, 2096 &ddc_i2c,
1885 CONNECTOR_OBJECT_ID_VGA); 2097 CONNECTOR_OBJECT_ID_VGA,
2098 &hpd);
1886 } else { 2099 } else {
1887 DRM_DEBUG("No connector info found\n"); 2100 DRM_DEBUG("No connector info found\n");
1888 return false; 2101 return false;
@@ -1910,27 +2123,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1910 case DDC_MONID: 2123 case DDC_MONID:
1911 ddc_i2c = 2124 ddc_i2c =
1912 combios_setup_i2c_bus 2125 combios_setup_i2c_bus
1913 (RADEON_GPIO_MONID); 2126 (rdev, RADEON_GPIO_MONID);
1914 break; 2127 break;
1915 case DDC_DVI: 2128 case DDC_DVI:
1916 ddc_i2c = 2129 ddc_i2c =
1917 combios_setup_i2c_bus 2130 combios_setup_i2c_bus
1918 (RADEON_GPIO_DVI_DDC); 2131 (rdev, RADEON_GPIO_DVI_DDC);
1919 break; 2132 break;
1920 case DDC_VGA: 2133 case DDC_VGA:
1921 ddc_i2c = 2134 ddc_i2c =
1922 combios_setup_i2c_bus 2135 combios_setup_i2c_bus
1923 (RADEON_GPIO_VGA_DDC); 2136 (rdev, RADEON_GPIO_VGA_DDC);
1924 break; 2137 break;
1925 case DDC_CRT2: 2138 case DDC_CRT2:
1926 ddc_i2c = 2139 ddc_i2c =
1927 combios_setup_i2c_bus 2140 combios_setup_i2c_bus
1928 (RADEON_GPIO_CRT2_DDC); 2141 (rdev, RADEON_GPIO_CRT2_DDC);
1929 break; 2142 break;
1930 case DDC_LCD: 2143 case DDC_LCD:
1931 ddc_i2c = 2144 ddc_i2c =
1932 combios_setup_i2c_bus 2145 combios_setup_i2c_bus
1933 (RADEON_LCD_GPIO_MASK); 2146 (rdev, RADEON_GPIOPAD_MASK);
1934 ddc_i2c.mask_clk_mask = 2147 ddc_i2c.mask_clk_mask =
1935 RBIOS32(lcd_ddc_info + 3); 2148 RBIOS32(lcd_ddc_info + 3);
1936 ddc_i2c.mask_data_mask = 2149 ddc_i2c.mask_data_mask =
@@ -1939,19 +2152,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1939 RBIOS32(lcd_ddc_info + 3); 2152 RBIOS32(lcd_ddc_info + 3);
1940 ddc_i2c.a_data_mask = 2153 ddc_i2c.a_data_mask =
1941 RBIOS32(lcd_ddc_info + 7); 2154 RBIOS32(lcd_ddc_info + 7);
1942 ddc_i2c.put_clk_mask = 2155 ddc_i2c.en_clk_mask =
1943 RBIOS32(lcd_ddc_info + 3); 2156 RBIOS32(lcd_ddc_info + 3);
1944 ddc_i2c.put_data_mask = 2157 ddc_i2c.en_data_mask =
1945 RBIOS32(lcd_ddc_info + 7); 2158 RBIOS32(lcd_ddc_info + 7);
1946 ddc_i2c.get_clk_mask = 2159 ddc_i2c.y_clk_mask =
1947 RBIOS32(lcd_ddc_info + 3); 2160 RBIOS32(lcd_ddc_info + 3);
1948 ddc_i2c.get_data_mask = 2161 ddc_i2c.y_data_mask =
1949 RBIOS32(lcd_ddc_info + 7); 2162 RBIOS32(lcd_ddc_info + 7);
1950 break; 2163 break;
1951 case DDC_GPIO: 2164 case DDC_GPIO:
1952 ddc_i2c = 2165 ddc_i2c =
1953 combios_setup_i2c_bus 2166 combios_setup_i2c_bus
1954 (RADEON_MDGPIO_EN_REG); 2167 (rdev, RADEON_MDGPIO_MASK);
1955 ddc_i2c.mask_clk_mask = 2168 ddc_i2c.mask_clk_mask =
1956 RBIOS32(lcd_ddc_info + 3); 2169 RBIOS32(lcd_ddc_info + 3);
1957 ddc_i2c.mask_data_mask = 2170 ddc_i2c.mask_data_mask =
@@ -1960,13 +2173,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1960 RBIOS32(lcd_ddc_info + 3); 2173 RBIOS32(lcd_ddc_info + 3);
1961 ddc_i2c.a_data_mask = 2174 ddc_i2c.a_data_mask =
1962 RBIOS32(lcd_ddc_info + 7); 2175 RBIOS32(lcd_ddc_info + 7);
1963 ddc_i2c.put_clk_mask = 2176 ddc_i2c.en_clk_mask =
1964 RBIOS32(lcd_ddc_info + 3); 2177 RBIOS32(lcd_ddc_info + 3);
1965 ddc_i2c.put_data_mask = 2178 ddc_i2c.en_data_mask =
1966 RBIOS32(lcd_ddc_info + 7); 2179 RBIOS32(lcd_ddc_info + 7);
1967 ddc_i2c.get_clk_mask = 2180 ddc_i2c.y_clk_mask =
1968 RBIOS32(lcd_ddc_info + 3); 2181 RBIOS32(lcd_ddc_info + 3);
1969 ddc_i2c.get_data_mask = 2182 ddc_i2c.y_data_mask =
1970 RBIOS32(lcd_ddc_info + 7); 2183 RBIOS32(lcd_ddc_info + 7);
1971 break; 2184 break;
1972 default: 2185 default:
@@ -1977,12 +2190,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1977 } else 2190 } else
1978 ddc_i2c.valid = false; 2191 ddc_i2c.valid = false;
1979 2192
2193 hpd.hpd = RADEON_HPD_NONE;
1980 radeon_add_legacy_connector(dev, 2194 radeon_add_legacy_connector(dev,
1981 5, 2195 5,
1982 ATOM_DEVICE_LCD1_SUPPORT, 2196 ATOM_DEVICE_LCD1_SUPPORT,
1983 DRM_MODE_CONNECTOR_LVDS, 2197 DRM_MODE_CONNECTOR_LVDS,
1984 &ddc_i2c, 2198 &ddc_i2c,
1985 CONNECTOR_OBJECT_ID_LVDS); 2199 CONNECTOR_OBJECT_ID_LVDS,
2200 &hpd);
1986 } 2201 }
1987 } 2202 }
1988 2203
@@ -1993,6 +2208,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1993 if (tv_info) { 2208 if (tv_info) {
1994 if (RBIOS8(tv_info + 6) == 'T') { 2209 if (RBIOS8(tv_info + 6) == 'T') {
1995 if (radeon_apply_legacy_tv_quirks(dev)) { 2210 if (radeon_apply_legacy_tv_quirks(dev)) {
2211 hpd.hpd = RADEON_HPD_NONE;
1996 radeon_add_legacy_encoder(dev, 2212 radeon_add_legacy_encoder(dev,
1997 radeon_get_encoder_id 2213 radeon_get_encoder_id
1998 (dev, 2214 (dev,
@@ -2003,7 +2219,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2003 ATOM_DEVICE_TV1_SUPPORT, 2219 ATOM_DEVICE_TV1_SUPPORT,
2004 DRM_MODE_CONNECTOR_SVIDEO, 2220 DRM_MODE_CONNECTOR_SVIDEO,
2005 &ddc_i2c, 2221 &ddc_i2c,
2006 CONNECTOR_OBJECT_ID_SVIDEO); 2222 CONNECTOR_OBJECT_ID_SVIDEO,
2223 &hpd);
2007 } 2224 }
2008 } 2225 }
2009 } 2226 }
@@ -2014,6 +2231,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2014 return true; 2231 return true;
2015} 2232}
2016 2233
2234void radeon_external_tmds_setup(struct drm_encoder *encoder)
2235{
2236 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2237 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2238
2239 if (!tmds)
2240 return;
2241
2242 switch (tmds->dvo_chip) {
2243 case DVO_SIL164:
2244 /* sil 164 */
2245 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2246 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2247 tmds->slave_addr,
2248 0x08, 0x30);
2249 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2250 tmds->slave_addr,
2251 0x09, 0x00);
2252 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2253 tmds->slave_addr,
2254 0x0a, 0x90);
2255 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2256 tmds->slave_addr,
2257 0x0c, 0x89);
2258 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2259 tmds->slave_addr,
2260 0x08, 0x3b);
2261 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2262 break;
2263 case DVO_SIL1178:
2264 /* sil 1178 - untested */
2265 /*
2266 * 0x0f, 0x44
2267 * 0x0f, 0x4c
2268 * 0x0e, 0x01
2269 * 0x0a, 0x80
2270 * 0x09, 0x30
2271 * 0x0c, 0xc9
2272 * 0x0d, 0x70
2273 * 0x08, 0x32
2274 * 0x08, 0x33
2275 */
2276 break;
2277 default:
2278 break;
2279 }
2280
2281}
2282
2283bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2284{
2285 struct drm_device *dev = encoder->dev;
2286 struct radeon_device *rdev = dev->dev_private;
2287 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2288 uint16_t offset;
2289 uint8_t blocks, slave_addr, rev;
2290 uint32_t index, id;
2291 uint32_t reg, val, and_mask, or_mask;
2292 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2293
2294 if (rdev->bios == NULL)
2295 return false;
2296
2297 if (!tmds)
2298 return false;
2299
2300 if (rdev->flags & RADEON_IS_IGP) {
2301 offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
2302 rev = RBIOS8(offset);
2303 if (offset) {
2304 rev = RBIOS8(offset);
2305 if (rev > 1) {
2306 blocks = RBIOS8(offset + 3);
2307 index = offset + 4;
2308 while (blocks > 0) {
2309 id = RBIOS16(index);
2310 index += 2;
2311 switch (id >> 13) {
2312 case 0:
2313 reg = (id & 0x1fff) * 4;
2314 val = RBIOS32(index);
2315 index += 4;
2316 WREG32(reg, val);
2317 break;
2318 case 2:
2319 reg = (id & 0x1fff) * 4;
2320 and_mask = RBIOS32(index);
2321 index += 4;
2322 or_mask = RBIOS32(index);
2323 index += 4;
2324 val = RREG32(reg);
2325 val = (val & and_mask) | or_mask;
2326 WREG32(reg, val);
2327 break;
2328 case 3:
2329 val = RBIOS16(index);
2330 index += 2;
2331 udelay(val);
2332 break;
2333 case 4:
2334 val = RBIOS16(index);
2335 index += 2;
2336 udelay(val * 1000);
2337 break;
2338 case 6:
2339 slave_addr = id & 0xff;
2340 slave_addr >>= 1; /* 7 bit addressing */
2341 index++;
2342 reg = RBIOS8(index);
2343 index++;
2344 val = RBIOS8(index);
2345 index++;
2346 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2347 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2348 slave_addr,
2349 reg, val);
2350 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2351 break;
2352 default:
2353 DRM_ERROR("Unknown id %d\n", id >> 13);
2354 break;
2355 }
2356 blocks--;
2357 }
2358 return true;
2359 }
2360 }
2361 } else {
2362 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
2363 if (offset) {
2364 index = offset + 10;
2365 id = RBIOS16(index);
2366 while (id != 0xffff) {
2367 index += 2;
2368 switch (id >> 13) {
2369 case 0:
2370 reg = (id & 0x1fff) * 4;
2371 val = RBIOS32(index);
2372 WREG32(reg, val);
2373 break;
2374 case 2:
2375 reg = (id & 0x1fff) * 4;
2376 and_mask = RBIOS32(index);
2377 index += 4;
2378 or_mask = RBIOS32(index);
2379 index += 4;
2380 val = RREG32(reg);
2381 val = (val & and_mask) | or_mask;
2382 WREG32(reg, val);
2383 break;
2384 case 4:
2385 val = RBIOS16(index);
2386 index += 2;
2387 udelay(val);
2388 break;
2389 case 5:
2390 reg = id & 0x1fff;
2391 and_mask = RBIOS32(index);
2392 index += 4;
2393 or_mask = RBIOS32(index);
2394 index += 4;
2395 val = RREG32_PLL(reg);
2396 val = (val & and_mask) | or_mask;
2397 WREG32_PLL(reg, val);
2398 break;
2399 case 6:
2400 reg = id & 0x1fff;
2401 val = RBIOS8(index);
2402 index += 1;
2403 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2404 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2405 tmds->slave_addr,
2406 reg, val);
2407 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2408 break;
2409 default:
2410 DRM_ERROR("Unknown id %d\n", id >> 13);
2411 break;
2412 }
2413 id = RBIOS16(index);
2414 }
2415 return true;
2416 }
2417 }
2418 return false;
2419}
2420
2017static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) 2421static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
2018{ 2422{
2019 struct radeon_device *rdev = dev->dev_private; 2423 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ceae3af..5eece186e03c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43void radeon_connector_hotplug(struct drm_connector *connector)
44{
45 struct drm_device *dev = connector->dev;
46 struct radeon_device *rdev = dev->dev_private;
47 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
48
49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
51
52 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
53 if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
54 if (radeon_dp_needs_link_train(radeon_connector)) {
55 if (connector->encoder)
56 dp_link_train(connector->encoder, connector);
57 }
58 }
59 }
60
61}
62
43static void radeon_property_change_mode(struct drm_encoder *encoder) 63static void radeon_property_change_mode(struct drm_encoder *encoder)
44{ 64{
45 struct drm_crtc *crtc = encoder->crtc; 65 struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
445 ret = connector_status_connected; 465 ret = connector_status_connected;
446 else { 466 else {
447 if (radeon_connector->ddc_bus) { 467 if (radeon_connector->ddc_bus) {
448 radeon_i2c_do_lock(radeon_connector, 1); 468 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
449 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 469 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
450 &radeon_connector->ddc_bus->adapter); 470 &radeon_connector->ddc_bus->adapter);
451 radeon_i2c_do_lock(radeon_connector, 0); 471 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
452 if (radeon_connector->edid) 472 if (radeon_connector->edid)
453 ret = connector_status_connected; 473 ret = connector_status_connected;
454 } 474 }
@@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
553 if (!encoder) 573 if (!encoder)
554 ret = connector_status_disconnected; 574 ret = connector_status_disconnected;
555 575
556 radeon_i2c_do_lock(radeon_connector, 1); 576 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
557 dret = radeon_ddc_probe(radeon_connector); 577 dret = radeon_ddc_probe(radeon_connector);
558 radeon_i2c_do_lock(radeon_connector, 0); 578 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
559 if (dret) { 579 if (dret) {
560 if (radeon_connector->edid) { 580 if (radeon_connector->edid) {
561 kfree(radeon_connector->edid); 581 kfree(radeon_connector->edid);
562 radeon_connector->edid = NULL; 582 radeon_connector->edid = NULL;
563 } 583 }
564 radeon_i2c_do_lock(radeon_connector, 1); 584 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
565 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 585 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
566 radeon_i2c_do_lock(radeon_connector, 0); 586 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
567 587
568 if (!radeon_connector->edid) { 588 if (!radeon_connector->edid) {
569 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 589 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
708 enum drm_connector_status ret = connector_status_disconnected; 728 enum drm_connector_status ret = connector_status_disconnected;
709 bool dret; 729 bool dret;
710 730
711 radeon_i2c_do_lock(radeon_connector, 1); 731 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
712 dret = radeon_ddc_probe(radeon_connector); 732 dret = radeon_ddc_probe(radeon_connector);
713 radeon_i2c_do_lock(radeon_connector, 0); 733 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
714 if (dret) { 734 if (dret) {
715 if (radeon_connector->edid) { 735 if (radeon_connector->edid) {
716 kfree(radeon_connector->edid); 736 kfree(radeon_connector->edid);
717 radeon_connector->edid = NULL; 737 radeon_connector->edid = NULL;
718 } 738 }
719 radeon_i2c_do_lock(radeon_connector, 1); 739 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
720 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 740 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
721 radeon_i2c_do_lock(radeon_connector, 0); 741 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
722 742
723 if (!radeon_connector->edid) { 743 if (!radeon_connector->edid) {
724 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 744 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
735 ret = connector_status_disconnected; 755 ret = connector_status_disconnected;
736 } else 756 } else
737 ret = connector_status_connected; 757 ret = connector_status_connected;
758
759 /* multiple connectors on the same encoder with the same ddc line
760 * This tends to be HDMI and DVI on the same encoder with the
761 * same ddc line. If the edid says HDMI, consider the HDMI port
762 * connected and the DVI port disconnected. If the edid doesn't
763 * say HDMI, vice versa.
764 */
765 if (radeon_connector->shared_ddc && connector_status_connected) {
766 struct drm_device *dev = connector->dev;
767 struct drm_connector *list_connector;
768 struct radeon_connector *list_radeon_connector;
769 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
770 if (connector == list_connector)
771 continue;
772 list_radeon_connector = to_radeon_connector(list_connector);
773 if (radeon_connector->devices == list_radeon_connector->devices) {
774 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
775 if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
776 kfree(radeon_connector->edid);
777 radeon_connector->edid = NULL;
778 ret = connector_status_disconnected;
779 }
780 } else {
781 if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
782 (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
783 kfree(radeon_connector->edid);
784 radeon_connector->edid = NULL;
785 ret = connector_status_disconnected;
786 }
787 }
788 }
789 }
790 }
738 } 791 }
739 } 792 }
740 793
@@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
863 .force = radeon_dvi_force, 916 .force = radeon_dvi_force,
864}; 917};
865 918
919static void radeon_dp_connector_destroy(struct drm_connector *connector)
920{
921 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
922 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
923
924 if (radeon_connector->ddc_bus)
925 radeon_i2c_destroy(radeon_connector->ddc_bus);
926 if (radeon_connector->edid)
927 kfree(radeon_connector->edid);
928 if (radeon_dig_connector->dp_i2c_bus)
929 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
930 kfree(radeon_connector->con_priv);
931 drm_sysfs_connector_remove(connector);
932 drm_connector_cleanup(connector);
933 kfree(connector);
934}
935
936static int radeon_dp_get_modes(struct drm_connector *connector)
937{
938 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
939 int ret;
940
941 ret = radeon_ddc_get_modes(radeon_connector);
942 return ret;
943}
944
945static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
946{
947 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
948 enum drm_connector_status ret = connector_status_disconnected;
949 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
950 u8 sink_type;
951
952 if (radeon_connector->edid) {
953 kfree(radeon_connector->edid);
954 radeon_connector->edid = NULL;
955 }
956
957 sink_type = radeon_dp_getsinktype(radeon_connector);
958 if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
959 if (radeon_dp_getdpcd(radeon_connector)) {
960 radeon_dig_connector->dp_sink_type = sink_type;
961 ret = connector_status_connected;
962 }
963 } else {
964 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
965 if (radeon_ddc_probe(radeon_connector)) {
966 radeon_dig_connector->dp_sink_type = sink_type;
967 ret = connector_status_connected;
968 }
969 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
970 }
971
972 return ret;
973}
974
975static int radeon_dp_mode_valid(struct drm_connector *connector,
976 struct drm_display_mode *mode)
977{
978 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
979 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
980
981 /* XXX check mode bandwidth */
982
983 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
984 return radeon_dp_mode_valid_helper(radeon_connector, mode);
985 else
986 return MODE_OK;
987}
988
989struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
990 .get_modes = radeon_dp_get_modes,
991 .mode_valid = radeon_dp_mode_valid,
992 .best_encoder = radeon_dvi_encoder,
993};
994
995struct drm_connector_funcs radeon_dp_connector_funcs = {
996 .dpms = drm_helper_connector_dpms,
997 .detect = radeon_dp_detect,
998 .fill_modes = drm_helper_probe_single_connector_modes,
999 .set_property = radeon_connector_set_property,
1000 .destroy = radeon_dp_connector_destroy,
1001 .force = radeon_dvi_force,
1002};
1003
866void 1004void
867radeon_add_atom_connector(struct drm_device *dev, 1005radeon_add_atom_connector(struct drm_device *dev,
868 uint32_t connector_id, 1006 uint32_t connector_id,
@@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
871 struct radeon_i2c_bus_rec *i2c_bus, 1009 struct radeon_i2c_bus_rec *i2c_bus,
872 bool linkb, 1010 bool linkb,
873 uint32_t igp_lane_info, 1011 uint32_t igp_lane_info,
874 uint16_t connector_object_id) 1012 uint16_t connector_object_id,
1013 struct radeon_hpd *hpd)
875{ 1014{
876 struct radeon_device *rdev = dev->dev_private; 1015 struct radeon_device *rdev = dev->dev_private;
877 struct drm_connector *connector; 1016 struct drm_connector *connector;
@@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
911 radeon_connector->devices = supported_device; 1050 radeon_connector->devices = supported_device;
912 radeon_connector->shared_ddc = shared_ddc; 1051 radeon_connector->shared_ddc = shared_ddc;
913 radeon_connector->connector_object_id = connector_object_id; 1052 radeon_connector->connector_object_id = connector_object_id;
1053 radeon_connector->hpd = *hpd;
914 switch (connector_type) { 1054 switch (connector_type) {
915 case DRM_MODE_CONNECTOR_VGA: 1055 case DRM_MODE_CONNECTOR_VGA:
916 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1056 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
963 drm_connector_attach_property(&radeon_connector->base, 1103 drm_connector_attach_property(&radeon_connector->base,
964 rdev->mode_info.coherent_mode_property, 1104 rdev->mode_info.coherent_mode_property,
965 1); 1105 1);
966 radeon_connector->dac_load_detect = true; 1106 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
967 drm_connector_attach_property(&radeon_connector->base, 1107 radeon_connector->dac_load_detect = true;
968 rdev->mode_info.load_detect_property, 1108 drm_connector_attach_property(&radeon_connector->base,
969 1); 1109 rdev->mode_info.load_detect_property,
1110 1);
1111 }
970 break; 1112 break;
971 case DRM_MODE_CONNECTOR_HDMIA: 1113 case DRM_MODE_CONNECTOR_HDMIA:
972 case DRM_MODE_CONNECTOR_HDMIB: 1114 case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
997 radeon_dig_connector->linkb = linkb; 1139 radeon_dig_connector->linkb = linkb;
998 radeon_dig_connector->igp_lane_info = igp_lane_info; 1140 radeon_dig_connector->igp_lane_info = igp_lane_info;
999 radeon_connector->con_priv = radeon_dig_connector; 1141 radeon_connector->con_priv = radeon_dig_connector;
1000 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1142 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1001 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1143 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1002 if (ret) 1144 if (ret)
1003 goto failed; 1145 goto failed;
1004 if (i2c_bus->valid) { 1146 if (i2c_bus->valid) {
1147 /* add DP i2c bus */
1148 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1149 if (!radeon_dig_connector->dp_i2c_bus)
1150 goto failed;
1005 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1151 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
1006 if (!radeon_connector->ddc_bus) 1152 if (!radeon_connector->ddc_bus)
1007 goto failed; 1153 goto failed;
1008 } 1154 }
1009 subpixel_order = SubPixelHorizontalRGB; 1155 subpixel_order = SubPixelHorizontalRGB;
1156 drm_connector_attach_property(&radeon_connector->base,
1157 rdev->mode_info.coherent_mode_property,
1158 1);
1010 break; 1159 break;
1011 case DRM_MODE_CONNECTOR_SVIDEO: 1160 case DRM_MODE_CONNECTOR_SVIDEO:
1012 case DRM_MODE_CONNECTOR_Composite: 1161 case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1020 drm_connector_attach_property(&radeon_connector->base, 1169 drm_connector_attach_property(&radeon_connector->base,
1021 rdev->mode_info.load_detect_property, 1170 rdev->mode_info.load_detect_property,
1022 1); 1171 1);
1172 drm_connector_attach_property(&radeon_connector->base,
1173 rdev->mode_info.tv_std_property,
1174 1);
1023 } 1175 }
1024 break; 1176 break;
1025 case DRM_MODE_CONNECTOR_LVDS: 1177 case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1038 if (!radeon_connector->ddc_bus) 1190 if (!radeon_connector->ddc_bus)
1039 goto failed; 1191 goto failed;
1040 } 1192 }
1041 drm_mode_create_scaling_mode_property(dev);
1042 drm_connector_attach_property(&radeon_connector->base, 1193 drm_connector_attach_property(&radeon_connector->base,
1043 dev->mode_config.scaling_mode_property, 1194 dev->mode_config.scaling_mode_property,
1044 DRM_MODE_SCALE_FULLSCREEN); 1195 DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1063 uint32_t supported_device, 1214 uint32_t supported_device,
1064 int connector_type, 1215 int connector_type,
1065 struct radeon_i2c_bus_rec *i2c_bus, 1216 struct radeon_i2c_bus_rec *i2c_bus,
1066 uint16_t connector_object_id) 1217 uint16_t connector_object_id,
1218 struct radeon_hpd *hpd)
1067{ 1219{
1068 struct radeon_device *rdev = dev->dev_private; 1220 struct radeon_device *rdev = dev->dev_private;
1069 struct drm_connector *connector; 1221 struct drm_connector *connector;
@@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1093 radeon_connector->connector_id = connector_id; 1245 radeon_connector->connector_id = connector_id;
1094 radeon_connector->devices = supported_device; 1246 radeon_connector->devices = supported_device;
1095 radeon_connector->connector_object_id = connector_object_id; 1247 radeon_connector->connector_object_id = connector_object_id;
1248 radeon_connector->hpd = *hpd;
1096 switch (connector_type) { 1249 switch (connector_type) {
1097 case DRM_MODE_CONNECTOR_VGA: 1250 case DRM_MODE_CONNECTOR_VGA:
1098 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1251 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1160 drm_connector_attach_property(&radeon_connector->base, 1313 drm_connector_attach_property(&radeon_connector->base,
1161 rdev->mode_info.load_detect_property, 1314 rdev->mode_info.load_detect_property,
1162 1); 1315 1);
1316 drm_connector_attach_property(&radeon_connector->base,
1317 rdev->mode_info.tv_std_property,
1318 1);
1163 } 1319 }
1164 break; 1320 break;
1165 case DRM_MODE_CONNECTOR_LVDS: 1321 case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc79dd82..0b2f9c2ad2c1 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1941 for (t = 0; t < dev_priv->usec_timeout; t++) { 1941 for (t = 0; t < dev_priv->usec_timeout; t++) {
1942 u32 done_age = GET_SCRATCH(dev_priv, 1); 1942 u32 done_age = GET_SCRATCH(dev_priv, 1);
1943 DRM_DEBUG("done_age = %d\n", done_age); 1943 DRM_DEBUG("done_age = %d\n", done_age);
1944 for (i = start; i < dma->buf_count; i++) { 1944 for (i = 0; i < dma->buf_count; i++) {
1945 buf = dma->buflist[i]; 1945 buf = dma->buflist[start];
1946 buf_priv = buf->dev_private; 1946 buf_priv = buf->dev_private;
1947 if (buf->file_priv == NULL || (buf->pending && 1947 if (buf->file_priv == NULL || (buf->pending &&
1948 buf_priv->age <= 1948 buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1951 buf->pending = 0; 1951 buf->pending = 0;
1952 return buf; 1952 return buf;
1953 } 1953 }
1954 start = 0; 1954 if (++start >= dma->buf_count)
1955 start = 0;
1955 } 1956 }
1956 1957
1957 if (t) { 1958 if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1960 } 1961 }
1961 } 1962 }
1962 1963
1963 DRM_DEBUG("returning NULL!\n");
1964 return NULL; 1964 return NULL;
1965} 1965}
1966 1966
1967#if 0
1968struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1969{
1970 struct drm_device_dma *dma = dev->dma;
1971 drm_radeon_private_t *dev_priv = dev->dev_private;
1972 drm_radeon_buf_priv_t *buf_priv;
1973 struct drm_buf *buf;
1974 int i, t;
1975 int start;
1976 u32 done_age;
1977
1978 done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
1979 if (++dev_priv->last_buf >= dma->buf_count)
1980 dev_priv->last_buf = 0;
1981
1982 start = dev_priv->last_buf;
1983 dev_priv->stats.freelist_loops++;
1984
1985 for (t = 0; t < 2; t++) {
1986 for (i = start; i < dma->buf_count; i++) {
1987 buf = dma->buflist[i];
1988 buf_priv = buf->dev_private;
1989 if (buf->file_priv == 0 || (buf->pending &&
1990 buf_priv->age <=
1991 done_age)) {
1992 dev_priv->stats.requested_bufs++;
1993 buf->pending = 0;
1994 return buf;
1995 }
1996 }
1997 start = 0;
1998 }
1999
2000 return NULL;
2001}
2002#endif
2003
2004void radeon_freelist_reset(struct drm_device * dev) 1967void radeon_freelist_reset(struct drm_device * dev)
2005{ 1968{
2006 struct drm_device_dma *dma = dev->dma; 1969 struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a264..65590a0f1d93 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
76 } 76 }
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.robj = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains; 80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain; 81 p->relocs[i].lobj.wdomain = r->write_domain;
82 p->relocs[i].handle = r->handle; 82 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags; 83 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list); 84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_object_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_object_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated, p->ib->fence);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (error) {
193 radeon_object_list_unvalidate(&parser->validated); 193 radeon_bo_list_unvalidate(&parser->validated,
194 parser->ib->fence);
194 } else { 195 } else {
195 radeon_object_list_clean(&parser->validated); 196 radeon_bo_list_unreserve(&parser->validated);
196 } 197 }
197 for (i = 0; i < parser->nrelocs; i++) { 198 for (i = 0; i < parser->nrelocs; i++) {
198 if (parser->relocs[i].gobj) { 199 if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe734..02bcdb1240c0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
44 if (rdev->family < CHIP_R600) { 44 if (rdev->family < CHIP_R600) {
45 int i; 45 int i;
46 46
47 for (i = 0; i < 8; i++) { 47 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
48 WREG32(RADEON_SURFACE0_INFO + 48 if (rdev->surface_regs[i].bo)
49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 49 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
50 0); 50 else
51 radeon_clear_surface_reg(rdev, i);
51 } 52 }
52 /* enable surfaces */ 53 /* enable surfaces */
53 WREG32(RADEON_SURFACE_CNTL, 0); 54 WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
208 209
209} 210}
210 211
212bool radeon_boot_test_post_card(struct radeon_device *rdev)
213{
214 if (radeon_card_posted(rdev))
215 return true;
216
217 if (rdev->bios) {
218 DRM_INFO("GPU not posted. posting now...\n");
219 if (rdev->is_atom_bios)
220 atom_asic_init(rdev->mode_info.atom_context);
221 else
222 radeon_combios_asic_init(rdev->ddev);
223 return true;
224 } else {
225 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
226 return false;
227 }
228}
229
211int radeon_dummy_page_init(struct radeon_device *rdev) 230int radeon_dummy_page_init(struct radeon_device *rdev)
212{ 231{
213 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 232 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
463 482
464 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 483 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 484 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
485 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
466 return 0; 486 return 0;
467} 487}
468 488
469void radeon_atombios_fini(struct radeon_device *rdev) 489void radeon_atombios_fini(struct radeon_device *rdev)
470{ 490{
471 kfree(rdev->mode_info.atom_context); 491 if (rdev->mode_info.atom_context) {
492 kfree(rdev->mode_info.atom_context->scratch);
493 kfree(rdev->mode_info.atom_context);
494 }
472 kfree(rdev->mode_info.atom_card_info); 495 kfree(rdev->mode_info.atom_card_info);
473} 496}
474 497
@@ -544,16 +567,24 @@ int radeon_device_init(struct radeon_device *rdev,
544 mutex_init(&rdev->cs_mutex); 567 mutex_init(&rdev->cs_mutex);
545 mutex_init(&rdev->ib_pool.mutex); 568 mutex_init(&rdev->ib_pool.mutex);
546 mutex_init(&rdev->cp.mutex); 569 mutex_init(&rdev->cp.mutex);
570 if (rdev->family >= CHIP_R600)
571 spin_lock_init(&rdev->ih.lock);
572 mutex_init(&rdev->gem.mutex);
547 rwlock_init(&rdev->fence_drv.lock); 573 rwlock_init(&rdev->fence_drv.lock);
548 INIT_LIST_HEAD(&rdev->gem.objects); 574 INIT_LIST_HEAD(&rdev->gem.objects);
549 575
576 /* setup workqueue */
577 rdev->wq = create_workqueue("radeon");
578 if (rdev->wq == NULL)
579 return -ENOMEM;
580
550 /* Set asic functions */ 581 /* Set asic functions */
551 r = radeon_asic_init(rdev); 582 r = radeon_asic_init(rdev);
552 if (r) { 583 if (r) {
553 return r; 584 return r;
554 } 585 }
555 586
556 if (radeon_agpmode == -1) { 587 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
557 radeon_agp_disable(rdev); 588 radeon_agp_disable(rdev);
558 } 589 }
559 590
@@ -620,6 +651,7 @@ void radeon_device_fini(struct radeon_device *rdev)
620 DRM_INFO("radeon: finishing device.\n"); 651 DRM_INFO("radeon: finishing device.\n");
621 rdev->shutdown = true; 652 rdev->shutdown = true;
622 radeon_fini(rdev); 653 radeon_fini(rdev);
654 destroy_workqueue(rdev->wq);
623 vga_client_register(rdev->pdev, NULL, NULL, NULL); 655 vga_client_register(rdev->pdev, NULL, NULL, NULL);
624 iounmap(rdev->rmmio); 656 iounmap(rdev->rmmio);
625 rdev->rmmio = NULL; 657 rdev->rmmio = NULL;
@@ -633,6 +665,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
633{ 665{
634 struct radeon_device *rdev = dev->dev_private; 666 struct radeon_device *rdev = dev->dev_private;
635 struct drm_crtc *crtc; 667 struct drm_crtc *crtc;
668 int r;
636 669
637 if (dev == NULL || rdev == NULL) { 670 if (dev == NULL || rdev == NULL) {
638 return -ENODEV; 671 return -ENODEV;
@@ -643,26 +676,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
643 /* unpin the front buffers */ 676 /* unpin the front buffers */
644 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 677 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
645 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 678 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
646 struct radeon_object *robj; 679 struct radeon_bo *robj;
647 680
648 if (rfb == NULL || rfb->obj == NULL) { 681 if (rfb == NULL || rfb->obj == NULL) {
649 continue; 682 continue;
650 } 683 }
651 robj = rfb->obj->driver_private; 684 robj = rfb->obj->driver_private;
652 if (robj != rdev->fbdev_robj) { 685 if (robj != rdev->fbdev_rbo) {
653 radeon_object_unpin(robj); 686 r = radeon_bo_reserve(robj, false);
687 if (unlikely(r == 0)) {
688 radeon_bo_unpin(robj);
689 radeon_bo_unreserve(robj);
690 }
654 } 691 }
655 } 692 }
656 /* evict vram memory */ 693 /* evict vram memory */
657 radeon_object_evict_vram(rdev); 694 radeon_bo_evict_vram(rdev);
658 /* wait for gpu to finish processing current batch */ 695 /* wait for gpu to finish processing current batch */
659 radeon_fence_wait_last(rdev); 696 radeon_fence_wait_last(rdev);
660 697
661 radeon_save_bios_scratch_regs(rdev); 698 radeon_save_bios_scratch_regs(rdev);
662 699
663 radeon_suspend(rdev); 700 radeon_suspend(rdev);
701 radeon_hpd_fini(rdev);
664 /* evict remaining vram memory */ 702 /* evict remaining vram memory */
665 radeon_object_evict_vram(rdev); 703 radeon_bo_evict_vram(rdev);
666 704
667 pci_save_state(dev->pdev); 705 pci_save_state(dev->pdev);
668 if (state.event == PM_EVENT_SUSPEND) { 706 if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@ int radeon_resume_kms(struct drm_device *dev)
695 fb_set_suspend(rdev->fbdev_info, 0); 733 fb_set_suspend(rdev->fbdev_info, 0);
696 release_console_sem(); 734 release_console_sem();
697 735
736 /* reset hpd state */
737 radeon_hpd_init(rdev);
698 /* blat the mode back in */ 738 /* blat the mode back in */
699 drm_helper_resume_force_mode(dev); 739 drm_helper_resume_force_mode(dev);
700 return 0; 740 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4afcb7a..a133b833e45d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
250 "HDMI-B", 250 "HDMI-B",
251}; 251};
252 252
253static const char *hpd_names[7] = {
254 "NONE",
255 "HPD1",
256 "HPD2",
257 "HPD3",
258 "HPD4",
259 "HPD5",
260 "HPD6",
261};
262
253static void radeon_print_display_setup(struct drm_device *dev) 263static void radeon_print_display_setup(struct drm_device *dev)
254{ 264{
255 struct drm_connector *connector; 265 struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
264 radeon_connector = to_radeon_connector(connector); 274 radeon_connector = to_radeon_connector(connector);
265 DRM_INFO("Connector %d:\n", i); 275 DRM_INFO("Connector %d:\n", i);
266 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 276 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
277 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
278 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
267 if (radeon_connector->ddc_bus) 279 if (radeon_connector->ddc_bus)
268 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 280 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
269 radeon_connector->ddc_bus->rec.mask_clk_reg, 281 radeon_connector->ddc_bus->rec.mask_clk_reg,
270 radeon_connector->ddc_bus->rec.mask_data_reg, 282 radeon_connector->ddc_bus->rec.mask_data_reg,
271 radeon_connector->ddc_bus->rec.a_clk_reg, 283 radeon_connector->ddc_bus->rec.a_clk_reg,
272 radeon_connector->ddc_bus->rec.a_data_reg, 284 radeon_connector->ddc_bus->rec.a_data_reg,
273 radeon_connector->ddc_bus->rec.put_clk_reg, 285 radeon_connector->ddc_bus->rec.en_clk_reg,
274 radeon_connector->ddc_bus->rec.put_data_reg, 286 radeon_connector->ddc_bus->rec.en_data_reg,
275 radeon_connector->ddc_bus->rec.get_clk_reg, 287 radeon_connector->ddc_bus->rec.y_clk_reg,
276 radeon_connector->ddc_bus->rec.get_data_reg); 288 radeon_connector->ddc_bus->rec.y_data_reg);
277 DRM_INFO(" Encoders:\n"); 289 DRM_INFO(" Encoders:\n");
278 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 290 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
279 radeon_encoder = to_radeon_encoder(encoder); 291 radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
324 ret = radeon_get_legacy_connector_info_from_table(dev); 336 ret = radeon_get_legacy_connector_info_from_table(dev);
325 } 337 }
326 if (ret) { 338 if (ret) {
339 radeon_setup_encoder_clones(dev);
327 radeon_print_display_setup(dev); 340 radeon_print_display_setup(dev);
328 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) 341 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
329 radeon_ddc_dump(drm_connector); 342 radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
336{ 349{
337 int ret = 0; 350 int ret = 0;
338 351
352 if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
353 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
354 if (dig->dp_i2c_bus)
355 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
356 }
339 if (!radeon_connector->ddc_bus) 357 if (!radeon_connector->ddc_bus)
340 return -1; 358 return -1;
341 if (!radeon_connector->edid) { 359 if (!radeon_connector->edid) {
342 radeon_i2c_do_lock(radeon_connector, 1); 360 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
343 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
344 radeon_i2c_do_lock(radeon_connector, 0); 362 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
345 } 363 }
346 364
347 if (radeon_connector->edid) { 365 if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
361 379
362 if (!radeon_connector->ddc_bus) 380 if (!radeon_connector->ddc_bus)
363 return -1; 381 return -1;
364 radeon_i2c_do_lock(radeon_connector, 1); 382 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
365 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 383 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
366 radeon_i2c_do_lock(radeon_connector, 0); 384 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
367 if (edid) { 385 if (edid) {
368 kfree(edid); 386 kfree(edid);
369 } 387 }
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
542 *post_div_p = best_post_div; 560 *post_div_p = best_post_div;
543} 561}
544 562
563void radeon_compute_pll_avivo(struct radeon_pll *pll,
564 uint64_t freq,
565 uint32_t *dot_clock_p,
566 uint32_t *fb_div_p,
567 uint32_t *frac_fb_div_p,
568 uint32_t *ref_div_p,
569 uint32_t *post_div_p,
570 int flags)
571{
572 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
573 fixed20_12 pll_out_max, pll_out_min;
574 fixed20_12 pll_in_max, pll_in_min;
575 fixed20_12 reference_freq;
576 fixed20_12 error, ffreq, a, b;
577
578 pll_out_max.full = rfixed_const(pll->pll_out_max);
579 pll_out_min.full = rfixed_const(pll->pll_out_min);
580 pll_in_max.full = rfixed_const(pll->pll_in_max);
581 pll_in_min.full = rfixed_const(pll->pll_in_min);
582 reference_freq.full = rfixed_const(pll->reference_freq);
583 do_div(freq, 10);
584 ffreq.full = rfixed_const(freq);
585 error.full = rfixed_const(100 * 100);
586
587 /* max p */
588 p.full = rfixed_div(pll_out_max, ffreq);
589 p.full = rfixed_floor(p);
590
591 /* min m */
592 m.full = rfixed_div(reference_freq, pll_in_max);
593 m.full = rfixed_ceil(m);
594
595 while (1) {
596 n.full = rfixed_div(ffreq, reference_freq);
597 n.full = rfixed_mul(n, m);
598 n.full = rfixed_mul(n, p);
599
600 f_vco.full = rfixed_div(n, m);
601 f_vco.full = rfixed_mul(f_vco, reference_freq);
602
603 f_pclk.full = rfixed_div(f_vco, p);
604
605 if (f_pclk.full > ffreq.full)
606 error.full = f_pclk.full - ffreq.full;
607 else
608 error.full = ffreq.full - f_pclk.full;
609 error.full = rfixed_div(error, f_pclk);
610 a.full = rfixed_const(100 * 100);
611 error.full = rfixed_mul(error, a);
612
613 a.full = rfixed_mul(m, p);
614 a.full = rfixed_div(n, a);
615 best_freq.full = rfixed_mul(reference_freq, a);
616
617 if (rfixed_trunc(error) < 25)
618 break;
619
620 a.full = rfixed_const(1);
621 m.full = m.full + a.full;
622 a.full = rfixed_div(reference_freq, m);
623 if (a.full >= pll_in_min.full)
624 continue;
625
626 m.full = rfixed_div(reference_freq, pll_in_max);
627 m.full = rfixed_ceil(m);
628 a.full= rfixed_const(1);
629 p.full = p.full - a.full;
630 a.full = rfixed_mul(p, ffreq);
631 if (a.full >= pll_out_min.full)
632 continue;
633 else {
634 DRM_ERROR("Unable to find pll dividers\n");
635 break;
636 }
637 }
638
639 a.full = rfixed_const(10);
640 b.full = rfixed_mul(n, a);
641
642 frac_n.full = rfixed_floor(n);
643 frac_n.full = rfixed_mul(frac_n, a);
644 frac_n.full = b.full - frac_n.full;
645
646 *dot_clock_p = rfixed_trunc(best_freq);
647 *fb_div_p = rfixed_trunc(n);
648 *frac_fb_div_p = rfixed_trunc(frac_n);
649 *ref_div_p = rfixed_trunc(m);
650 *post_div_p = rfixed_trunc(p);
651
652 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
653}
654
545static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 655static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
546{ 656{
547 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 657 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
642 return -ENOMEM; 752 return -ENOMEM;
643 753
644 rdev->mode_info.coherent_mode_property->values[0] = 0; 754 rdev->mode_info.coherent_mode_property->values[0] = 0;
645 rdev->mode_info.coherent_mode_property->values[0] = 1; 755 rdev->mode_info.coherent_mode_property->values[1] = 1;
646 } 756 }
647 757
648 if (!ASIC_IS_AVIVO(rdev)) { 758 if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
666 if (!rdev->mode_info.load_detect_property) 776 if (!rdev->mode_info.load_detect_property)
667 return -ENOMEM; 777 return -ENOMEM;
668 rdev->mode_info.load_detect_property->values[0] = 0; 778 rdev->mode_info.load_detect_property->values[0] = 0;
669 rdev->mode_info.load_detect_property->values[0] = 1; 779 rdev->mode_info.load_detect_property->values[1] = 1;
670 780
671 drm_mode_create_scaling_mode_property(rdev->ddev); 781 drm_mode_create_scaling_mode_property(rdev->ddev);
672 782
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
723 if (!ret) { 833 if (!ret) {
724 return ret; 834 return ret;
725 } 835 }
836 /* initialize hpd */
837 radeon_hpd_init(rdev);
726 drm_helper_initial_config(rdev->ddev); 838 drm_helper_initial_config(rdev->ddev);
727 return 0; 839 return 0;
728} 840}
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
730void radeon_modeset_fini(struct radeon_device *rdev) 842void radeon_modeset_fini(struct radeon_device *rdev)
731{ 843{
732 if (rdev->mode_info.mode_config_initialized) { 844 if (rdev->mode_info.mode_config_initialized) {
845 radeon_hpd_fini(rdev);
733 drm_mode_config_cleanup(rdev->ddev); 846 drm_mode_config_cleanup(rdev->ddev);
734 rdev->mode_info.mode_config_initialized = false; 847 rdev->mode_info.mode_config_initialized = false;
735 } 848 }
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
750 if (encoder->crtc != crtc) 863 if (encoder->crtc != crtc)
751 continue; 864 continue;
752 if (first) { 865 if (first) {
753 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 866 /* set scaling */
867 if (radeon_encoder->rmx_type == RMX_OFF)
868 radeon_crtc->rmx_type = RMX_OFF;
869 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
870 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
871 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
872 else
873 radeon_crtc->rmx_type = RMX_OFF;
874 /* copy native mode */
754 memcpy(&radeon_crtc->native_mode, 875 memcpy(&radeon_crtc->native_mode,
755 &radeon_encoder->native_mode, 876 &radeon_encoder->native_mode,
756 sizeof(struct drm_display_mode)); 877 sizeof(struct drm_display_mode));
757 first = false; 878 first = false;
758 } else { 879 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af8..28077247f4f3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 86int radeon_testing = 0;
87int radeon_connector_table = 0; 87int radeon_connector_table = 0;
88int radeon_tv = 1; 88int radeon_tv = 1;
89int radeon_new_pll = 1;
89 90
90MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 91MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
91module_param_named(no_wb, radeon_no_wb, int, 0444); 92module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
120MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 121MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
121module_param_named(tv, radeon_tv, int, 0444); 122module_param_named(tv, radeon_tv, int, 0444);
122 123
124MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
125module_param_named(new_pll, radeon_new_pll, int, 0444);
126
123static int radeon_suspend(struct drm_device *dev, pm_message_t state) 127static int radeon_suspend(struct drm_device *dev, pm_message_t state)
124{ 128{
125 drm_radeon_private_t *dev_priv = dev->dev_private; 129 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f346..e13785282a82 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1104# define R600_IT_WAIT_REG_MEM 0x00003C00 1104# define R600_IT_WAIT_REG_MEM 0x00003C00
1105# define R600_IT_MEM_WRITE 0x00003D00 1105# define R600_IT_MEM_WRITE 0x00003D00
1106# define R600_IT_INDIRECT_BUFFER 0x00003200 1106# define R600_IT_INDIRECT_BUFFER 0x00003200
1107# define R600_IT_CP_INTERRUPT 0x00004000
1108# define R600_IT_SURFACE_SYNC 0x00004300 1107# define R600_IT_SURFACE_SYNC 0x00004300
1109# define R600_CB0_DEST_BASE_ENA (1 << 6) 1108# define R600_CB0_DEST_BASE_ENA (1 << 6)
1110# define R600_TC_ACTION_ENA (1 << 23) 1109# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc512d75a..b4f23ec93201 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@ extern int atom_debug;
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode); 36 struct drm_display_mode *mode);
37 37
38static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
39{
40 struct drm_device *dev = encoder->dev;
41 struct radeon_device *rdev = dev->dev_private;
42 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
43 struct drm_encoder *clone_encoder;
44 uint32_t index_mask = 0;
45 int count;
46
47 /* DIG routing gets problematic */
48 if (rdev->family >= CHIP_R600)
49 return index_mask;
50 /* LVDS/TV are too wacky */
51 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
52 return index_mask;
53 /* DVO requires 2x ppll clocks depending on tmds chip */
54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
55 return index_mask;
56
57 count = -1;
58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
60 count++;
61
62 if (clone_encoder == encoder)
63 continue;
64 if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
65 continue;
66 if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
67 continue;
68 else
69 index_mask |= (1 << count);
70 }
71 return index_mask;
72}
73
74void radeon_setup_encoder_clones(struct drm_device *dev)
75{
76 struct drm_encoder *encoder;
77
78 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
79 encoder->possible_clones = radeon_encoder_clones(encoder);
80 }
81}
82
38uint32_t 83uint32_t
39radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) 84radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
40{ 85{
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
163 return NULL; 208 return NULL;
164} 209}
165 210
166/* used for both atom and legacy */
167void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
168 struct drm_display_mode *mode,
169 struct drm_display_mode *adjusted_mode)
170{
171 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
172 struct drm_device *dev = encoder->dev;
173 struct radeon_device *rdev = dev->dev_private;
174 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
175
176 if (mode->hdisplay < native_mode->hdisplay ||
177 mode->vdisplay < native_mode->vdisplay) {
178 int mode_id = adjusted_mode->base.id;
179 *adjusted_mode = *native_mode;
180 if (!ASIC_IS_AVIVO(rdev)) {
181 adjusted_mode->hdisplay = mode->hdisplay;
182 adjusted_mode->vdisplay = mode->vdisplay;
183 }
184 adjusted_mode->base.id = mode_id;
185 }
186}
187
188
189static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 211static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
190 struct drm_display_mode *mode, 212 struct drm_display_mode *mode,
191 struct drm_display_mode *adjusted_mode) 213 struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
198 radeon_encoder_set_active_device(encoder); 220 radeon_encoder_set_active_device(encoder);
199 drm_mode_set_crtcinfo(adjusted_mode, 0); 221 drm_mode_set_crtcinfo(adjusted_mode, 0);
200 222
201 if (radeon_encoder->rmx_type != RMX_OFF)
202 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
203
204 /* hw bug */ 223 /* hw bug */
205 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) 224 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
206 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 225 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
207 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 226 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
208 227
228 /* get the native mode for LVDS */
229 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
230 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
231 int mode_id = adjusted_mode->base.id;
232 *adjusted_mode = *native_mode;
233 if (!ASIC_IS_AVIVO(rdev)) {
234 adjusted_mode->hdisplay = mode->hdisplay;
235 adjusted_mode->vdisplay = mode->vdisplay;
236 }
237 adjusted_mode->base.id = mode_id;
238 }
239
240 /* get the native mode for TV */
209 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { 241 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
210 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; 242 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
211 if (tv_dac) { 243 if (tv_dac) {
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
218 } 250 }
219 } 251 }
220 252
253 if (ASIC_IS_DCE3(rdev) &&
254 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
255 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
256 radeon_dp_set_link_config(connector, mode);
257 }
258
221 return true; 259 return true;
222} 260}
223 261
@@ -392,7 +430,7 @@ union lvds_encoder_control {
392 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; 430 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
393}; 431};
394 432
395static void 433void
396atombios_digital_setup(struct drm_encoder *encoder, int action) 434atombios_digital_setup(struct drm_encoder *encoder, int action)
397{ 435{
398 struct drm_device *dev = encoder->dev; 436 struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
522{ 560{
523 struct drm_connector *connector; 561 struct drm_connector *connector;
524 struct radeon_connector *radeon_connector; 562 struct radeon_connector *radeon_connector;
563 struct radeon_connector_atom_dig *radeon_dig_connector;
525 564
526 connector = radeon_get_connector_for_encoder(encoder); 565 connector = radeon_get_connector_for_encoder(encoder);
527 if (!connector) 566 if (!connector)
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
551 return ATOM_ENCODER_MODE_LVDS; 590 return ATOM_ENCODER_MODE_LVDS;
552 break; 591 break;
553 case DRM_MODE_CONNECTOR_DisplayPort: 592 case DRM_MODE_CONNECTOR_DisplayPort:
554 /*if (radeon_output->MonType == MT_DP) 593 radeon_dig_connector = radeon_connector->con_priv;
555 return ATOM_ENCODER_MODE_DP; 594 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
556 else*/ 595 return ATOM_ENCODER_MODE_DP;
557 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 596 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
558 return ATOM_ENCODER_MODE_HDMI; 597 return ATOM_ENCODER_MODE_HDMI;
559 else 598 else
560 return ATOM_ENCODER_MODE_DVI; 599 return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
573 } 612 }
574} 613}
575 614
615/*
616 * DIG Encoder/Transmitter Setup
617 *
618 * DCE 3.0/3.1
619 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
620 * Supports up to 3 digital outputs
621 * - 2 DIG encoder blocks.
622 * DIG1 can drive UNIPHY link A or link B
623 * DIG2 can drive UNIPHY link B or LVTMA
624 *
625 * DCE 3.2
626 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
627 * Supports up to 5 digital outputs
628 * - 2 DIG encoder blocks.
629 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
630 *
631 * Routing
632 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
633 * Examples:
634 * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
635 * crtc1 -> dig1 -> UNIPHY0 link B -> DP
636 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
637 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
638 */
576static void 639static void
577atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 640atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
578{ 641{
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
614 } else { 677 } else {
615 switch (radeon_encoder->encoder_id) { 678 switch (radeon_encoder->encoder_id) {
616 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 679 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
617 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 680 /* XXX doesn't really matter which dig encoder we pick as long as it's
681 * not already in use
682 */
683 if (dig_connector->linkb)
684 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
685 else
686 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
618 num = 1; 687 num = 1;
619 break; 688 break;
620 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 689 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
690 /* Only dig2 encoder can drive LVTMA */
621 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 691 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
622 num = 2; 692 num = 2;
623 break; 693 break;
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
652 } 722 }
653 } 723 }
654 724
655 if (radeon_encoder->pixel_clock > 165000) { 725 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
656 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; 726
727 if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
728 if (dig_connector->dp_clock == 270000)
729 args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
730 args.ucLaneNum = dig_connector->dp_lane_count;
731 } else if (radeon_encoder->pixel_clock > 165000)
657 args.ucLaneNum = 8; 732 args.ucLaneNum = 8;
658 } else { 733 else
659 if (dig_connector->linkb)
660 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
661 else
662 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
663 args.ucLaneNum = 4; 734 args.ucLaneNum = 4;
664 }
665 735
666 args.ucEncoderMode = atombios_get_encoder_mode(encoder); 736 if (dig_connector->linkb)
737 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
738 else
739 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
667 740
668 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 741 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
669 742
@@ -674,8 +747,8 @@ union dig_transmitter_control {
674 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 747 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
675}; 748};
676 749
677static void 750void
678atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) 751atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
679{ 752{
680 struct drm_device *dev = encoder->dev; 753 struct drm_device *dev = encoder->dev;
681 struct radeon_device *rdev = dev->dev_private; 754 struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
687 struct drm_connector *connector; 760 struct drm_connector *connector;
688 struct radeon_connector *radeon_connector; 761 struct radeon_connector *radeon_connector;
689 struct radeon_connector_atom_dig *dig_connector; 762 struct radeon_connector_atom_dig *dig_connector;
763 bool is_dp = false;
690 764
691 connector = radeon_get_connector_for_encoder(encoder); 765 connector = radeon_get_connector_for_encoder(encoder);
692 if (!connector) 766 if (!connector)
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
704 778
705 dig_connector = radeon_connector->con_priv; 779 dig_connector = radeon_connector->con_priv;
706 780
781 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
782 is_dp = true;
783
707 memset(&args, 0, sizeof(args)); 784 memset(&args, 0, sizeof(args));
708 785
709 if (ASIC_IS_DCE32(rdev)) 786 if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
724 args.v1.ucAction = action; 801 args.v1.ucAction = action;
725 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 802 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
726 args.v1.usInitInfo = radeon_connector->connector_object_id; 803 args.v1.usInitInfo = radeon_connector->connector_object_id;
804 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
805 args.v1.asMode.ucLaneSel = lane_num;
806 args.v1.asMode.ucLaneSet = lane_set;
727 } else { 807 } else {
728 if (radeon_encoder->pixel_clock > 165000) 808 if (is_dp)
809 args.v1.usPixelClock =
810 cpu_to_le16(dig_connector->dp_clock / 10);
811 else if (radeon_encoder->pixel_clock > 165000)
729 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 812 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
730 else 813 else
731 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 814 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
732 } 815 }
733 if (ASIC_IS_DCE32(rdev)) { 816 if (ASIC_IS_DCE32(rdev)) {
734 if (radeon_encoder->pixel_clock > 165000)
735 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
736 if (dig->dig_block) 817 if (dig->dig_block)
737 args.v2.acConfig.ucEncoderSel = 1; 818 args.v2.acConfig.ucEncoderSel = 1;
819 if (dig_connector->linkb)
820 args.v2.acConfig.ucLinkSel = 1;
738 821
739 switch (radeon_encoder->encoder_id) { 822 switch (radeon_encoder->encoder_id) {
740 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 823 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
751 break; 834 break;
752 } 835 }
753 836
754 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 837 if (is_dp)
838 args.v2.acConfig.fCoherentMode = 1;
839 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
755 if (dig->coherent_mode) 840 if (dig->coherent_mode)
756 args.v2.acConfig.fCoherentMode = 1; 841 args.v2.acConfig.fCoherentMode = 1;
757 } 842 }
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
760 845
761 switch (radeon_encoder->encoder_id) { 846 switch (radeon_encoder->encoder_id) {
762 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 847 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
763 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 848 /* XXX doesn't really matter which dig encoder we pick as long as it's
849 * not already in use
850 */
851 if (dig_connector->linkb)
852 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
853 else
854 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
764 if (rdev->flags & RADEON_IS_IGP) { 855 if (rdev->flags & RADEON_IS_IGP) {
765 if (radeon_encoder->pixel_clock > 165000) { 856 if (radeon_encoder->pixel_clock > 165000) {
766 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
767 ATOM_TRANSMITTER_CONFIG_LINKA_B);
768 if (dig_connector->igp_lane_info & 0x3) 857 if (dig_connector->igp_lane_info & 0x3)
769 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
770 else if (dig_connector->igp_lane_info & 0xc) 859 else if (dig_connector->igp_lane_info & 0xc)
771 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
772 } else { 861 } else {
773 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
774 if (dig_connector->igp_lane_info & 0x1) 862 if (dig_connector->igp_lane_info & 0x1)
775 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 863 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
776 else if (dig_connector->igp_lane_info & 0x2) 864 else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
780 else if (dig_connector->igp_lane_info & 0x8) 868 else if (dig_connector->igp_lane_info & 0x8)
781 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; 869 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
782 } 870 }
783 } else {
784 if (radeon_encoder->pixel_clock > 165000)
785 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
786 ATOM_TRANSMITTER_CONFIG_LINKA_B |
787 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
788 else {
789 if (dig_connector->linkb)
790 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
791 else
792 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
793 }
794 } 871 }
795 break; 872 break;
796 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 873 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
874 /* Only dig2 encoder can drive LVTMA */
797 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; 875 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
798 if (radeon_encoder->pixel_clock > 165000)
799 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
800 ATOM_TRANSMITTER_CONFIG_LINKA_B |
801 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
802 else {
803 if (dig_connector->linkb)
804 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
805 else
806 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
807 }
808 break; 876 break;
809 } 877 }
810 878
811 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 879 if (radeon_encoder->pixel_clock > 165000)
880 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
881
882 if (dig_connector->linkb)
883 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
884 else
885 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
886
887 if (is_dp)
888 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
889 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
812 if (dig->coherent_mode) 890 if (dig->coherent_mode)
813 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; 891 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
814 } 892 }
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
918 if (is_dig) { 996 if (is_dig) {
919 switch (mode) { 997 switch (mode) {
920 case DRM_MODE_DPMS_ON: 998 case DRM_MODE_DPMS_ON:
921 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); 999 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1000 {
1001 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1002 dp_link_train(encoder, connector);
1003 }
922 break; 1004 break;
923 case DRM_MODE_DPMS_STANDBY: 1005 case DRM_MODE_DPMS_STANDBY:
924 case DRM_MODE_DPMS_SUSPEND: 1006 case DRM_MODE_DPMS_SUSPEND:
925 case DRM_MODE_DPMS_OFF: 1007 case DRM_MODE_DPMS_OFF:
926 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); 1008 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
927 break; 1009 break;
928 } 1010 }
929 } else { 1011 } else {
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1025 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1107 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1026 else 1108 else
1027 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1109 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1028 } else 1110 } else {
1029 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1111 struct drm_connector *connector;
1112 struct radeon_connector *radeon_connector;
1113 struct radeon_connector_atom_dig *dig_connector;
1114
1115 connector = radeon_get_connector_for_encoder(encoder);
1116 if (!connector)
1117 return;
1118 radeon_connector = to_radeon_connector(connector);
1119 if (!radeon_connector->con_priv)
1120 return;
1121 dig_connector = radeon_connector->con_priv;
1122
1123 /* XXX doesn't really matter which dig encoder we pick as long as it's
1124 * not already in use
1125 */
1126 if (dig_connector->linkb)
1127 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1128 else
1129 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1130 }
1030 break; 1131 break;
1031 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1132 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1032 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1133 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1033 break; 1134 break;
1034 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1135 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1136 /* Only dig2 encoder can drive LVTMA */
1035 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1137 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1036 break; 1138 break;
1037 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1139 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1104 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1206 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1105 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1207 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1106 1208
1107 if (radeon_encoder->enc_priv) { 1209 if (radeon_encoder->active_device &
1108 struct radeon_encoder_atom_dig *dig; 1210 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1211 if (radeon_encoder->enc_priv) {
1212 struct radeon_encoder_atom_dig *dig;
1109 1213
1110 dig = radeon_encoder->enc_priv; 1214 dig = radeon_encoder->enc_priv;
1111 dig->dig_block = radeon_crtc->crtc_id; 1215 dig->dig_block = radeon_crtc->crtc_id;
1216 }
1112 } 1217 }
1113 radeon_encoder->pixel_clock = adjusted_mode->clock; 1218 radeon_encoder->pixel_clock = adjusted_mode->clock;
1114 1219
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1134 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1239 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1135 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1240 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1136 /* disable the encoder and transmitter */ 1241 /* disable the encoder and transmitter */
1137 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); 1242 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1138 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1243 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1139 1244
1140 /* setup and enable the encoder and transmitter */ 1245 /* setup and enable the encoder and transmitter */
1141 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1246 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1142 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); 1247 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1143 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); 1248 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1144 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); 1249 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1145 break; 1250 break;
1146 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1251 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1147 atombios_ddia_setup(encoder, ATOM_ENABLE); 1252 atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1354 encoder->possible_crtcs = 0x1; 1459 encoder->possible_crtcs = 0x1;
1355 else 1460 else
1356 encoder->possible_crtcs = 0x3; 1461 encoder->possible_crtcs = 0x3;
1357 encoder->possible_clones = 0;
1358 1462
1359 radeon_encoder->enc_priv = NULL; 1463 radeon_encoder->enc_priv = NULL;
1360 1464
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d10eb43645c8..3ba213d1b06c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
140 struct radeon_framebuffer *rfb; 140 struct radeon_framebuffer *rfb;
141 struct drm_mode_fb_cmd mode_cmd; 141 struct drm_mode_fb_cmd mode_cmd;
142 struct drm_gem_object *gobj = NULL; 142 struct drm_gem_object *gobj = NULL;
143 struct radeon_object *robj = NULL; 143 struct radeon_bo *rbo = NULL;
144 struct device *device = &rdev->pdev->dev; 144 struct device *device = &rdev->pdev->dev;
145 int size, aligned_size, ret; 145 int size, aligned_size, ret;
146 u64 fb_gpuaddr; 146 u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
168 ret = radeon_gem_object_create(rdev, aligned_size, 0, 168 ret = radeon_gem_object_create(rdev, aligned_size, 0,
169 RADEON_GEM_DOMAIN_VRAM, 169 RADEON_GEM_DOMAIN_VRAM,
170 false, ttm_bo_type_kernel, 170 false, ttm_bo_type_kernel,
171 false, &gobj); 171 &gobj);
172 if (ret) { 172 if (ret) {
173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", 173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
174 surface_width, surface_height); 174 surface_width, surface_height);
175 ret = -ENOMEM; 175 ret = -ENOMEM;
176 goto out; 176 goto out;
177 } 177 }
178 robj = gobj->driver_private; 178 rbo = gobj->driver_private;
179 179
180 if (fb_tiled) 180 if (fb_tiled)
181 tiling_flags = RADEON_TILING_MACRO; 181 tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
192 } 192 }
193#endif 193#endif
194 194
195 if (tiling_flags) 195 if (tiling_flags) {
196 radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); 196 ret = radeon_bo_set_tiling_flags(rbo,
197 tiling_flags | RADEON_TILING_SURFACE,
198 mode_cmd.pitch);
199 if (ret)
200 dev_err(rdev->dev, "FB failed to set tiling flags\n");
201 }
197 mutex_lock(&rdev->ddev->struct_mutex); 202 mutex_lock(&rdev->ddev->struct_mutex);
198 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 203 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
199 if (fb == NULL) { 204 if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
201 ret = -ENOMEM; 206 ret = -ENOMEM;
202 goto out_unref; 207 goto out_unref;
203 } 208 }
204 ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); 209 ret = radeon_bo_reserve(rbo, false);
210 if (unlikely(ret != 0))
211 goto out_unref;
212 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
213 if (ret) {
214 radeon_bo_unreserve(rbo);
215 goto out_unref;
216 }
217 if (fb_tiled)
218 radeon_bo_check_tiling(rbo, 0, 0);
219 ret = radeon_bo_kmap(rbo, &fbptr);
220 radeon_bo_unreserve(rbo);
205 if (ret) { 221 if (ret) {
206 printk(KERN_ERR "failed to pin framebuffer\n");
207 ret = -ENOMEM;
208 goto out_unref; 222 goto out_unref;
209 } 223 }
210 224
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
213 *fb_p = fb; 227 *fb_p = fb;
214 rfb = to_radeon_framebuffer(fb); 228 rfb = to_radeon_framebuffer(fb);
215 rdev->fbdev_rfb = rfb; 229 rdev->fbdev_rfb = rfb;
216 rdev->fbdev_robj = robj; 230 rdev->fbdev_rbo = rbo;
217 231
218 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 232 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
219 if (info == NULL) { 233 if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
234 if (ret) 248 if (ret)
235 goto out_unref; 249 goto out_unref;
236 250
237 if (fb_tiled) 251 memset_io(fbptr, 0xff, aligned_size);
238 radeon_object_check_tiling(robj, 0, 0);
239
240 ret = radeon_object_kmap(robj, &fbptr);
241 if (ret) {
242 goto out_unref;
243 }
244
245 memset_io(fbptr, 0, aligned_size);
246 252
247 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
248 254
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
288 return 0; 294 return 0;
289 295
290out_unref: 296out_unref:
291 if (robj) { 297 if (rbo) {
292 radeon_object_kunmap(robj); 298 ret = radeon_bo_reserve(rbo, false);
299 if (likely(ret == 0)) {
300 radeon_bo_kunmap(rbo);
301 radeon_bo_unreserve(rbo);
302 }
293 } 303 }
294 if (fb && ret) { 304 if (fb && ret) {
295 list_del(&fb->filp_head); 305 list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
321 331
322int radeonfb_probe(struct drm_device *dev) 332int radeonfb_probe(struct drm_device *dev)
323{ 333{
324 return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); 334 struct radeon_device *rdev = dev->dev_private;
335 int bpp_sel = 32;
336
337 /* select 8 bpp console on RN50 or 16MB cards */
338 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
339 bpp_sel = 8;
340
341 return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
325} 342}
326 343
327int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 344int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
328{ 345{
329 struct fb_info *info; 346 struct fb_info *info;
330 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); 347 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
331 struct radeon_object *robj; 348 struct radeon_bo *rbo;
349 int r;
332 350
333 if (!fb) { 351 if (!fb) {
334 return -EINVAL; 352 return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
336 info = fb->fbdev; 354 info = fb->fbdev;
337 if (info) { 355 if (info) {
338 struct radeon_fb_device *rfbdev = info->par; 356 struct radeon_fb_device *rfbdev = info->par;
339 robj = rfb->obj->driver_private; 357 rbo = rfb->obj->driver_private;
340 unregister_framebuffer(info); 358 unregister_framebuffer(info);
341 radeon_object_kunmap(robj); 359 r = radeon_bo_reserve(rbo, false);
342 radeon_object_unpin(robj); 360 if (likely(r == 0)) {
361 radeon_bo_kunmap(rbo);
362 radeon_bo_unpin(rbo);
363 radeon_bo_unreserve(rbo);
364 }
343 drm_fb_helper_free(&rfbdev->helper); 365 drm_fb_helper_free(&rfbdev->helper);
344 framebuffer_release(info); 366 framebuffer_release(info);
345 } 367 }
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d74719..cb4cd97ae39f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
168 return signaled; 168 return signaled;
169} 169}
170 170
171int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
172{
173 struct radeon_device *rdev;
174 int ret = 0;
175
176 rdev = fence->rdev;
177
178 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
179
180 while (1) {
181 if (radeon_fence_signaled(fence))
182 break;
183
184 if (time_after_eq(jiffies, fence->timeout)) {
185 ret = -EBUSY;
186 break;
187 }
188
189 if (lazy)
190 schedule_timeout(1);
191
192 if (intr && signal_pending(current)) {
193 ret = -ERESTARTSYS;
194 break;
195 }
196 }
197 __set_current_state(TASK_RUNNING);
198 return ret;
199}
200
201
202int radeon_fence_wait(struct radeon_fence *fence, bool intr) 171int radeon_fence_wait(struct radeon_fence *fence, bool intr)
203{ 172{
204 struct radeon_device *rdev; 173 struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
216 return 0; 185 return 0;
217 } 186 }
218 187
219 if (rdev->family >= CHIP_R600) {
220 r = r600_fence_wait(fence, intr, 0);
221 if (r == -ERESTARTSYS)
222 return -EBUSY;
223 return r;
224 }
225
226retry: 188retry:
227 cur_jiffies = jiffies; 189 cur_jiffies = jiffies;
228 timeout = HZ / 100; 190 timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
231 } 193 }
232 194
233 if (intr) { 195 if (intr) {
196 radeon_irq_kms_sw_irq_get(rdev);
234 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
235 radeon_fence_signaled(fence), timeout); 198 radeon_fence_signaled(fence), timeout);
236 if (unlikely(r == -ERESTARTSYS)) { 199 radeon_irq_kms_sw_irq_put(rdev);
237 return -EBUSY; 200 if (unlikely(r < 0))
238 } 201 return r;
239 } else { 202 } else {
203 radeon_irq_kms_sw_irq_get(rdev);
240 r = wait_event_timeout(rdev->fence_drv.queue, 204 r = wait_event_timeout(rdev->fence_drv.queue,
241 radeon_fence_signaled(fence), timeout); 205 radeon_fence_signaled(fence), timeout);
206 radeon_irq_kms_sw_irq_put(rdev);
242 } 207 }
243 if (unlikely(!radeon_fence_signaled(fence))) { 208 if (unlikely(!radeon_fence_signaled(fence))) {
244 if (unlikely(r == 0)) { 209 if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d173847..3d4d84e078ac 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) } 38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12) 39#define rfixed_trunc(A) ((A).full >> 12)
40 40
41static inline u32 rfixed_floor(fixed20_12 A)
42{
43 u32 non_frac = rfixed_trunc(A);
44
45 return rfixed_const(non_frac);
46}
47
48static inline u32 rfixed_ceil(fixed20_12 A)
49{
50 u32 non_frac = rfixed_trunc(A);
51
52 if (A.full > rfixed_const(non_frac))
53 return rfixed_const(non_frac + 1);
54 else
55 return rfixed_const(non_frac);
56}
57
41static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) 58static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
42{ 59{
43 u64 tmp = ((u64)A.full << 13); 60 u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178c..e73d56e83fa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_object_create(rdev, NULL, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 rdev->gart.table_size, 82 true, RADEON_GEM_DOMAIN_VRAM,
83 true, 83 &rdev->gart.table.vram.robj);
84 RADEON_GEM_DOMAIN_VRAM,
85 false, &rdev->gart.table.vram.robj);
86 if (r) { 84 if (r) {
87 return r; 85 return r;
88 } 86 }
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
95 uint64_t gpu_addr; 93 uint64_t gpu_addr;
96 int r; 94 int r;
97 95
98 r = radeon_object_pin(rdev->gart.table.vram.robj, 96 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
99 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 97 if (unlikely(r != 0))
100 if (r) {
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 return r; 98 return r;
103 } 99 r = radeon_bo_pin(rdev->gart.table.vram.robj,
104 r = radeon_object_kmap(rdev->gart.table.vram.robj, 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
105 (void **)&rdev->gart.table.vram.ptr);
106 if (r) { 101 if (r) {
107 radeon_object_unpin(rdev->gart.table.vram.robj); 102 radeon_bo_unreserve(rdev->gart.table.vram.robj);
108 radeon_object_unref(&rdev->gart.table.vram.robj);
109 DRM_ERROR("radeon: failed to map gart vram table.\n");
110 return r; 103 return r;
111 } 104 }
105 r = radeon_bo_kmap(rdev->gart.table.vram.robj,
106 (void **)&rdev->gart.table.vram.ptr);
107 if (r)
108 radeon_bo_unpin(rdev->gart.table.vram.robj);
109 radeon_bo_unreserve(rdev->gart.table.vram.robj);
112 rdev->gart.table_addr = gpu_addr; 110 rdev->gart.table_addr = gpu_addr;
113 return 0; 111 return r;
114} 112}
115 113
116void radeon_gart_table_vram_free(struct radeon_device *rdev) 114void radeon_gart_table_vram_free(struct radeon_device *rdev)
117{ 115{
116 int r;
117
118 if (rdev->gart.table.vram.robj == NULL) { 118 if (rdev->gart.table.vram.robj == NULL) {
119 return; 119 return;
120 } 120 }
121 radeon_object_kunmap(rdev->gart.table.vram.robj); 121 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
122 radeon_object_unpin(rdev->gart.table.vram.robj); 122 if (likely(r == 0)) {
123 radeon_object_unref(&rdev->gart.table.vram.robj); 123 radeon_bo_kunmap(rdev->gart.table.vram.robj);
124 radeon_bo_unpin(rdev->gart.table.vram.robj);
125 radeon_bo_unreserve(rdev->gart.table.vram.robj);
126 }
127 radeon_bo_unref(&rdev->gart.table.vram.robj);
124} 128}
125 129
126 130
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254db..2944486871b0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
38 38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 40{
41 struct radeon_object *robj = gobj->driver_private; 41 struct radeon_bo *robj = gobj->driver_private;
42 42
43 gobj->driver_private = NULL; 43 gobj->driver_private = NULL;
44 if (robj) { 44 if (robj) {
45 radeon_object_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47} 47}
48 48
49int radeon_gem_object_create(struct radeon_device *rdev, int size, 49int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain, 50 int alignment, int initial_domain,
51 bool discardable, bool kernel, 51 bool discardable, bool kernel,
52 bool interruptible, 52 struct drm_gem_object **obj)
53 struct drm_gem_object **obj)
54{ 53{
55 struct drm_gem_object *gobj; 54 struct drm_gem_object *gobj;
56 struct radeon_object *robj; 55 struct radeon_bo *robj;
57 int r; 56 int r;
58 57
59 *obj = NULL; 58 *obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
65 if (alignment < PAGE_SIZE) { 64 if (alignment < PAGE_SIZE) {
66 alignment = PAGE_SIZE; 65 alignment = PAGE_SIZE;
67 } 66 }
68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
69 interruptible, &robj);
70 if (r) { 68 if (r) {
71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
72 size, initial_domain, alignment); 70 size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 81int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr) 82 uint64_t *gpu_addr)
85{ 83{
86 struct radeon_object *robj = obj->driver_private; 84 struct radeon_bo *robj = obj->driver_private;
87 uint32_t flags; 85 int r;
88 86
89 switch (pin_domain) { 87 r = radeon_bo_reserve(robj, false);
90 case RADEON_GEM_DOMAIN_VRAM: 88 if (unlikely(r != 0))
91 flags = TTM_PL_FLAG_VRAM; 89 return r;
92 break; 90 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
93 case RADEON_GEM_DOMAIN_GTT: 91 radeon_bo_unreserve(robj);
94 flags = TTM_PL_FLAG_TT; 92 return r;
95 break;
96 default:
97 flags = TTM_PL_FLAG_SYSTEM;
98 break;
99 }
100 return radeon_object_pin(robj, flags, gpu_addr);
101} 93}
102 94
103void radeon_gem_object_unpin(struct drm_gem_object *obj) 95void radeon_gem_object_unpin(struct drm_gem_object *obj)
104{ 96{
105 struct radeon_object *robj = obj->driver_private; 97 struct radeon_bo *robj = obj->driver_private;
106 radeon_object_unpin(robj); 98 int r;
99
100 r = radeon_bo_reserve(robj, false);
101 if (likely(r == 0)) {
102 radeon_bo_unpin(robj);
103 radeon_bo_unreserve(robj);
104 }
107} 105}
108 106
109int radeon_gem_set_domain(struct drm_gem_object *gobj, 107int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 uint32_t rdomain, uint32_t wdomain) 108 uint32_t rdomain, uint32_t wdomain)
111{ 109{
112 struct radeon_object *robj; 110 struct radeon_bo *robj;
113 uint32_t domain; 111 uint32_t domain;
114 int r; 112 int r;
115 113
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
127 } 125 }
128 if (domain == RADEON_GEM_DOMAIN_CPU) { 126 if (domain == RADEON_GEM_DOMAIN_CPU) {
129 /* Asking for cpu access wait for object idle */ 127 /* Asking for cpu access wait for object idle */
130 r = radeon_object_wait(robj); 128 r = radeon_bo_wait(robj, NULL, false);
131 if (r) { 129 if (r) {
132 printk(KERN_ERR "Failed to wait for object !\n"); 130 printk(KERN_ERR "Failed to wait for object !\n");
133 return r; 131 return r;
134 } 132 }
133 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
144 143
145void radeon_gem_fini(struct radeon_device *rdev) 144void radeon_gem_fini(struct radeon_device *rdev)
146{ 145{
147 radeon_object_force_delete(rdev); 146 radeon_bo_force_delete(rdev);
148} 147}
149 148
150 149
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
158 struct drm_radeon_gem_info *args = data; 157 struct drm_radeon_gem_info *args = data;
159 158
160 args->vram_size = rdev->mc.real_vram_size; 159 args->vram_size = rdev->mc.real_vram_size;
161 /* FIXME: report somethings that makes sense */ 160 args->vram_visible = rdev->mc.real_vram_size;
162 args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); 161 if (rdev->stollen_vga_memory)
163 args->gart_size = rdev->mc.gtt_size; 162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
163 if (rdev->fbdev_rbo)
164 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
166 RADEON_IB_POOL_SIZE*64*1024;
164 return 0; 167 return 0;
165} 168}
166 169
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
192 /* create a gem object to contain this object in */ 195 /* create a gem object to contain this object in */
193 args->size = roundup(args->size, PAGE_SIZE); 196 args->size = roundup(args->size, PAGE_SIZE);
194 r = radeon_gem_object_create(rdev, args->size, args->alignment, 197 r = radeon_gem_object_create(rdev, args->size, args->alignment,
195 args->initial_domain, false, 198 args->initial_domain, false,
196 false, true, &gobj); 199 false, &gobj);
197 if (r) { 200 if (r) {
198 return r; 201 return r;
199 } 202 }
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
218 * just validate the BO into a certain domain */ 221 * just validate the BO into a certain domain */
219 struct drm_radeon_gem_set_domain *args = data; 222 struct drm_radeon_gem_set_domain *args = data;
220 struct drm_gem_object *gobj; 223 struct drm_gem_object *gobj;
221 struct radeon_object *robj; 224 struct radeon_bo *robj;
222 int r; 225 int r;
223 226
224 /* for now if someone requests domain CPU - 227 /* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
244{ 247{
245 struct drm_radeon_gem_mmap *args = data; 248 struct drm_radeon_gem_mmap *args = data;
246 struct drm_gem_object *gobj; 249 struct drm_gem_object *gobj;
247 struct radeon_object *robj; 250 struct radeon_bo *robj;
248 int r;
249 251
250 gobj = drm_gem_object_lookup(dev, filp, args->handle); 252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
251 if (gobj == NULL) { 253 if (gobj == NULL) {
252 return -EINVAL; 254 return -EINVAL;
253 } 255 }
254 robj = gobj->driver_private; 256 robj = gobj->driver_private;
255 r = radeon_object_mmap(robj, &args->addr_ptr); 257 args->addr_ptr = radeon_bo_mmap_offset(robj);
256 mutex_lock(&dev->struct_mutex); 258 mutex_lock(&dev->struct_mutex);
257 drm_gem_object_unreference(gobj); 259 drm_gem_object_unreference(gobj);
258 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
259 return r; 261 return 0;
260} 262}
261 263
262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 264int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
264{ 266{
265 struct drm_radeon_gem_busy *args = data; 267 struct drm_radeon_gem_busy *args = data;
266 struct drm_gem_object *gobj; 268 struct drm_gem_object *gobj;
267 struct radeon_object *robj; 269 struct radeon_bo *robj;
268 int r; 270 int r;
269 uint32_t cur_placement; 271 uint32_t cur_placement = 0;
270 272
271 gobj = drm_gem_object_lookup(dev, filp, args->handle); 273 gobj = drm_gem_object_lookup(dev, filp, args->handle);
272 if (gobj == NULL) { 274 if (gobj == NULL) {
273 return -EINVAL; 275 return -EINVAL;
274 } 276 }
275 robj = gobj->driver_private; 277 robj = gobj->driver_private;
276 r = radeon_object_busy_domain(robj, &cur_placement); 278 r = radeon_bo_wait(robj, &cur_placement, true);
277 switch (cur_placement) { 279 switch (cur_placement) {
278 case TTM_PL_VRAM: 280 case TTM_PL_VRAM:
279 args->domain = RADEON_GEM_DOMAIN_VRAM; 281 args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
297{ 299{
298 struct drm_radeon_gem_wait_idle *args = data; 300 struct drm_radeon_gem_wait_idle *args = data;
299 struct drm_gem_object *gobj; 301 struct drm_gem_object *gobj;
300 struct radeon_object *robj; 302 struct radeon_bo *robj;
301 int r; 303 int r;
302 304
303 gobj = drm_gem_object_lookup(dev, filp, args->handle); 305 gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
305 return -EINVAL; 307 return -EINVAL;
306 } 308 }
307 robj = gobj->driver_private; 309 robj = gobj->driver_private;
308 r = radeon_object_wait(robj); 310 r = radeon_bo_wait(robj, NULL, false);
309 mutex_lock(&dev->struct_mutex); 311 mutex_lock(&dev->struct_mutex);
310 drm_gem_object_unreference(gobj); 312 drm_gem_object_unreference(gobj);
311 mutex_unlock(&dev->struct_mutex); 313 mutex_unlock(&dev->struct_mutex);
314 radeon_hdp_flush(robj->rdev);
312 return r; 315 return r;
313} 316}
314 317
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
317{ 320{
318 struct drm_radeon_gem_set_tiling *args = data; 321 struct drm_radeon_gem_set_tiling *args = data;
319 struct drm_gem_object *gobj; 322 struct drm_gem_object *gobj;
320 struct radeon_object *robj; 323 struct radeon_bo *robj;
321 int r = 0; 324 int r = 0;
322 325
323 DRM_DEBUG("%d \n", args->handle); 326 DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
325 if (gobj == NULL) 328 if (gobj == NULL)
326 return -EINVAL; 329 return -EINVAL;
327 robj = gobj->driver_private; 330 robj = gobj->driver_private;
328 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); 331 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
329 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
330 drm_gem_object_unreference(gobj); 333 drm_gem_object_unreference(gobj);
331 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
337{ 340{
338 struct drm_radeon_gem_get_tiling *args = data; 341 struct drm_radeon_gem_get_tiling *args = data;
339 struct drm_gem_object *gobj; 342 struct drm_gem_object *gobj;
340 struct radeon_object *robj; 343 struct radeon_bo *rbo;
341 int r = 0; 344 int r = 0;
342 345
343 DRM_DEBUG("\n"); 346 DRM_DEBUG("\n");
344 gobj = drm_gem_object_lookup(dev, filp, args->handle); 347 gobj = drm_gem_object_lookup(dev, filp, args->handle);
345 if (gobj == NULL) 348 if (gobj == NULL)
346 return -EINVAL; 349 return -EINVAL;
347 robj = gobj->driver_private; 350 rbo = gobj->driver_private;
348 radeon_object_get_tiling_flags(robj, &args->tiling_flags, 351 r = radeon_bo_reserve(rbo, false);
349 &args->pitch); 352 if (unlikely(r != 0))
353 return r;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo);
350 mutex_lock(&dev->struct_mutex); 356 mutex_lock(&dev->struct_mutex);
351 drm_gem_object_unreference(gobj); 357 drm_gem_object_unreference(gobj);
352 mutex_unlock(&dev->struct_mutex); 358 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c0..da3da1e89d00 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59} 59}
60 60
61 61
62void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) 62void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
63{ 63{
64 struct radeon_device *rdev = radeon_connector->base.dev->dev_private; 64 struct radeon_device *rdev = i2c->dev->dev_private;
65 struct radeon_i2c_bus_rec *rec = &i2c->rec;
65 uint32_t temp; 66 uint32_t temp;
66 struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
67 67
68 /* RV410 appears to have a bug where the hw i2c in reset 68 /* RV410 appears to have a bug where the hw i2c in reset
69 * holds the i2c port in a bad state - switch hw i2c away before 69 * holds the i2c port in a bad state - switch hw i2c away before
70 * doing DDC - do this for all r200s/r300s/r400s for safety sake 70 * doing DDC - do this for all r200s/r300s/r400s for safety sake
71 */ 71 */
72 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { 72 if (rec->hw_capable) {
73 if (rec->a_clk_reg == RADEON_GPIO_MONID) { 73 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
74 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 74 if (rec->a_clk_reg == RADEON_GPIO_MONID) {
75 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); 75 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
76 } else { 76 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
77 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 77 } else {
78 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); 78 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
79 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
80 }
79 } 81 }
80 } 82 }
81 if (lock_state) {
82 temp = RREG32(rec->a_clk_reg);
83 temp &= ~(rec->a_clk_mask);
84 WREG32(rec->a_clk_reg, temp);
85
86 temp = RREG32(rec->a_data_reg);
87 temp &= ~(rec->a_data_mask);
88 WREG32(rec->a_data_reg, temp);
89 }
90 83
84 /* clear the output pin values */
85 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
86 WREG32(rec->a_clk_reg, temp);
87
88 temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
89 WREG32(rec->a_data_reg, temp);
90
91 /* set the pins to input */
92 temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
93 WREG32(rec->en_clk_reg, temp);
94
95 temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
96 WREG32(rec->en_data_reg, temp);
97
98 /* mask the gpio pins for software use */
91 temp = RREG32(rec->mask_clk_reg); 99 temp = RREG32(rec->mask_clk_reg);
92 if (lock_state) 100 if (lock_state)
93 temp |= rec->mask_clk_mask; 101 temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
112 struct radeon_i2c_bus_rec *rec = &i2c->rec; 120 struct radeon_i2c_bus_rec *rec = &i2c->rec;
113 uint32_t val; 121 uint32_t val;
114 122
115 val = RREG32(rec->get_clk_reg); 123 /* read the value off the pin */
116 val &= rec->get_clk_mask; 124 val = RREG32(rec->y_clk_reg);
125 val &= rec->y_clk_mask;
117 126
118 return (val != 0); 127 return (val != 0);
119} 128}
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
126 struct radeon_i2c_bus_rec *rec = &i2c->rec; 135 struct radeon_i2c_bus_rec *rec = &i2c->rec;
127 uint32_t val; 136 uint32_t val;
128 137
129 val = RREG32(rec->get_data_reg); 138 /* read the value off the pin */
130 val &= rec->get_data_mask; 139 val = RREG32(rec->y_data_reg);
140 val &= rec->y_data_mask;
141
131 return (val != 0); 142 return (val != 0);
132} 143}
133 144
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
138 struct radeon_i2c_bus_rec *rec = &i2c->rec; 149 struct radeon_i2c_bus_rec *rec = &i2c->rec;
139 uint32_t val; 150 uint32_t val;
140 151
141 val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); 152 /* set pin direction */
142 val |= clock ? 0 : rec->put_clk_mask; 153 val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
143 WREG32(rec->put_clk_reg, val); 154 val |= clock ? 0 : rec->en_clk_mask;
155 WREG32(rec->en_clk_reg, val);
144} 156}
145 157
146static void set_data(void *i2c_priv, int data) 158static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
150 struct radeon_i2c_bus_rec *rec = &i2c->rec; 162 struct radeon_i2c_bus_rec *rec = &i2c->rec;
151 uint32_t val; 163 uint32_t val;
152 164
153 val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); 165 /* set pin direction */
154 val |= data ? 0 : rec->put_data_mask; 166 val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
155 WREG32(rec->put_data_reg, val); 167 val |= data ? 0 : rec->en_data_mask;
168 WREG32(rec->en_data_reg, val);
156} 169}
157 170
158struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 171struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
159 struct radeon_i2c_bus_rec *rec, 172 struct radeon_i2c_bus_rec *rec,
160 const char *name) 173 const char *name)
161{ 174{
162 struct radeon_i2c_chan *i2c; 175 struct radeon_i2c_chan *i2c;
163 int ret; 176 int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
167 return NULL; 180 return NULL;
168 181
169 i2c->adapter.owner = THIS_MODULE; 182 i2c->adapter.owner = THIS_MODULE;
170 i2c->adapter.algo_data = &i2c->algo;
171 i2c->dev = dev; 183 i2c->dev = dev;
172 i2c->algo.setsda = set_data; 184 i2c_set_adapdata(&i2c->adapter, i2c);
173 i2c->algo.setscl = set_clock; 185 i2c->adapter.algo_data = &i2c->algo.bit;
174 i2c->algo.getsda = get_data; 186 i2c->algo.bit.setsda = set_data;
175 i2c->algo.getscl = get_clock; 187 i2c->algo.bit.setscl = set_clock;
176 i2c->algo.udelay = 20; 188 i2c->algo.bit.getsda = get_data;
189 i2c->algo.bit.getscl = get_clock;
190 i2c->algo.bit.udelay = 20;
177 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always 191 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
178 * make this, 2 jiffies is a lot more reliable */ 192 * make this, 2 jiffies is a lot more reliable */
179 i2c->algo.timeout = 2; 193 i2c->algo.bit.timeout = 2;
180 i2c->algo.data = i2c; 194 i2c->algo.bit.data = i2c;
181 i2c->rec = *rec; 195 i2c->rec = *rec;
182 i2c_set_adapdata(&i2c->adapter, i2c);
183
184 ret = i2c_bit_add_bus(&i2c->adapter); 196 ret = i2c_bit_add_bus(&i2c->adapter);
185 if (ret) { 197 if (ret) {
186 DRM_INFO("Failed to register i2c %s\n", name); 198 DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
194 206
195} 207}
196 208
209struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
210 struct radeon_i2c_bus_rec *rec,
211 const char *name)
212{
213 struct radeon_i2c_chan *i2c;
214 int ret;
215
216 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
217 if (i2c == NULL)
218 return NULL;
219
220 i2c->rec = *rec;
221 i2c->adapter.owner = THIS_MODULE;
222 i2c->dev = dev;
223 i2c_set_adapdata(&i2c->adapter, i2c);
224 i2c->adapter.algo_data = &i2c->algo.dp;
225 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
226 i2c->algo.dp.address = 0;
227 ret = i2c_dp_aux_add_bus(&i2c->adapter);
228 if (ret) {
229 DRM_INFO("Failed to register i2c %s\n", name);
230 goto out_free;
231 }
232
233 return i2c;
234out_free:
235 kfree(i2c);
236 return NULL;
237
238}
239
240
197void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 241void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
198{ 242{
199 if (!i2c) 243 if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
207{ 251{
208 return NULL; 252 return NULL;
209} 253}
254
255void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
256 u8 slave_addr,
257 u8 addr,
258 u8 *val)
259{
260 u8 out_buf[2];
261 u8 in_buf[2];
262 struct i2c_msg msgs[] = {
263 {
264 .addr = slave_addr,
265 .flags = 0,
266 .len = 1,
267 .buf = out_buf,
268 },
269 {
270 .addr = slave_addr,
271 .flags = I2C_M_RD,
272 .len = 1,
273 .buf = in_buf,
274 }
275 };
276
277 out_buf[0] = addr;
278 out_buf[1] = 0;
279
280 if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
281 *val = in_buf[0];
282 DRM_DEBUG("val = 0x%02x\n", *val);
283 } else {
284 DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
285 addr, *val);
286 }
287}
288
289void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
290 u8 slave_addr,
291 u8 addr,
292 u8 val)
293{
294 uint8_t out_buf[2];
295 struct i2c_msg msg = {
296 .addr = slave_addr,
297 .flags = 0,
298 .len = 2,
299 .buf = out_buf,
300 };
301
302 out_buf[0] = addr;
303 out_buf[1] = val;
304
305 if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
306 DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
307 addr, val);
308}
309
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe6232dcb6..9223296fe37b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
39 return radeon_irq_process(rdev); 39 return radeon_irq_process(rdev);
40} 40}
41 41
42/*
43 * Handle hotplug events outside the interrupt handler proper.
44 */
45static void radeon_hotplug_work_func(struct work_struct *work)
46{
47 struct radeon_device *rdev = container_of(work, struct radeon_device,
48 hotplug_work);
49 struct drm_device *dev = rdev->ddev;
50 struct drm_mode_config *mode_config = &dev->mode_config;
51 struct drm_connector *connector;
52
53 if (mode_config->num_connector) {
54 list_for_each_entry(connector, &mode_config->connector_list, head)
55 radeon_connector_hotplug(connector);
56 }
57 /* Just fire off a uevent and let userspace tell us what to do */
58 drm_sysfs_hotplug_event(dev);
59}
60
42void radeon_driver_irq_preinstall_kms(struct drm_device *dev) 61void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
43{ 62{
44 struct radeon_device *rdev = dev->dev_private; 63 struct radeon_device *rdev = dev->dev_private;
45 unsigned i; 64 unsigned i;
46 65
66 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
67
47 /* Disable *all* interrupts */ 68 /* Disable *all* interrupts */
48 rdev->irq.sw_int = false; 69 rdev->irq.sw_int = false;
49 for (i = 0; i < 2; i++) { 70 for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
87 108
88 if (rdev->flags & RADEON_SINGLE_CRTC) 109 if (rdev->flags & RADEON_SINGLE_CRTC)
89 num_crtc = 1; 110 num_crtc = 1;
90 111 spin_lock_init(&rdev->irq.sw_lock);
91 r = drm_vblank_init(rdev->ddev, num_crtc); 112 r = drm_vblank_init(rdev->ddev, num_crtc);
92 if (r) { 113 if (r) {
93 return r; 114 return r;
94 } 115 }
95 /* enable msi */ 116 /* enable msi */
96 rdev->msi_enabled = 0; 117 rdev->msi_enabled = 0;
97 if (rdev->family >= CHIP_RV380) { 118 /* MSIs don't seem to work on my rs780;
119 * not sure about rs880 or other rs780s.
120 * Needs more investigation.
121 */
122 if ((rdev->family >= CHIP_RV380) &&
123 (rdev->family != CHIP_RS780) &&
124 (rdev->family != CHIP_RS880)) {
98 int ret = pci_enable_msi(rdev->pdev); 125 int ret = pci_enable_msi(rdev->pdev);
99 if (!ret) 126 if (!ret) {
100 rdev->msi_enabled = 1; 127 rdev->msi_enabled = 1;
128 DRM_INFO("radeon: using MSI.\n");
129 }
101 } 130 }
102 drm_irq_install(rdev->ddev); 131 drm_irq_install(rdev->ddev);
103 rdev->irq.installed = true; 132 rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
114 pci_disable_msi(rdev->pdev); 143 pci_disable_msi(rdev->pdev);
115 } 144 }
116} 145}
146
147void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
148{
149 unsigned long irqflags;
150
151 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
152 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
153 rdev->irq.sw_int = true;
154 radeon_irq_set(rdev);
155 }
156 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
157}
158
159void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
160{
161 unsigned long irqflags;
162
163 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
164 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
165 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
166 rdev->irq.sw_int = false;
167 radeon_irq_set(rdev);
168 }
169 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
170}
171
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057a..f23b05606eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33int radeon_driver_unload_kms(struct drm_device *dev)
34{
35 struct radeon_device *rdev = dev->dev_private;
36
37 if (rdev == NULL)
38 return 0;
39 radeon_modeset_fini(rdev);
40 radeon_device_fini(rdev);
41 kfree(rdev);
42 dev->dev_private = NULL;
43 return 0;
44}
33 45
34/*
35 * Driver load/unload
36 */
37int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 46int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
38{ 47{
39 struct radeon_device *rdev; 48 struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
62 */ 71 */
63 r = radeon_device_init(rdev, dev, dev->pdev, flags); 72 r = radeon_device_init(rdev, dev, dev->pdev, flags);
64 if (r) { 73 if (r) {
65 DRM_ERROR("Fatal error while trying to initialize radeon.\n"); 74 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
66 return r; 75 goto out;
67 } 76 }
68 /* Again modeset_init should fail only on fatal error 77 /* Again modeset_init should fail only on fatal error
69 * otherwise it should provide enough functionalities 78 * otherwise it should provide enough functionalities
70 * for shadowfb to run 79 * for shadowfb to run
71 */ 80 */
72 r = radeon_modeset_init(rdev); 81 r = radeon_modeset_init(rdev);
73 if (r) { 82 if (r)
74 return r; 83 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
75 } 84out:
76 return 0; 85 if (r)
77} 86 radeon_driver_unload_kms(dev);
78 87 return r;
79int radeon_driver_unload_kms(struct drm_device *dev)
80{
81 struct radeon_device *rdev = dev->dev_private;
82
83 if (rdev == NULL)
84 return 0;
85 radeon_modeset_fini(rdev);
86 radeon_device_fini(rdev);
87 kfree(rdev);
88 dev->dev_private = NULL;
89 return 0;
90} 88}
91 89
92 90
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa87fa4..b82ede98e152 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33static void radeon_overscan_setup(struct drm_crtc *crtc,
34 struct drm_display_mode *mode)
35{
36 struct drm_device *dev = crtc->dev;
37 struct radeon_device *rdev = dev->dev_private;
38 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
39
40 WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
41 WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
42 WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
43}
44
33static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, 45static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
34 struct drm_display_mode *mode, 46 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode) 47 struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
292 uint32_t mask; 304 uint32_t mask;
293 305
294 if (radeon_crtc->crtc_id) 306 if (radeon_crtc->crtc_id)
295 mask = (RADEON_CRTC2_EN | 307 mask = (RADEON_CRTC2_DISP_DIS |
296 RADEON_CRTC2_DISP_DIS |
297 RADEON_CRTC2_VSYNC_DIS | 308 RADEON_CRTC2_VSYNC_DIS |
298 RADEON_CRTC2_HSYNC_DIS | 309 RADEON_CRTC2_HSYNC_DIS |
299 RADEON_CRTC2_DISP_REQ_EN_B); 310 RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
305 switch (mode) { 316 switch (mode) {
306 case DRM_MODE_DPMS_ON: 317 case DRM_MODE_DPMS_ON:
307 if (radeon_crtc->crtc_id) 318 if (radeon_crtc->crtc_id)
308 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); 319 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
309 else { 320 else {
310 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | 321 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
311 RADEON_CRTC_DISP_REQ_EN_B)); 322 RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
319 case DRM_MODE_DPMS_OFF: 330 case DRM_MODE_DPMS_OFF:
320 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 331 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
321 if (radeon_crtc->crtc_id) 332 if (radeon_crtc->crtc_id)
322 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); 333 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
323 else { 334 else {
324 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | 335 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
325 RADEON_CRTC_DISP_REQ_EN_B)); 336 RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
400 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 411 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
401 struct radeon_framebuffer *radeon_fb; 412 struct radeon_framebuffer *radeon_fb;
402 struct drm_gem_object *obj; 413 struct drm_gem_object *obj;
414 struct radeon_bo *rbo;
403 uint64_t base; 415 uint64_t base;
404 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; 416 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
405 uint32_t crtc_pitch, pitch_pixels; 417 uint32_t crtc_pitch, pitch_pixels;
406 uint32_t tiling_flags; 418 uint32_t tiling_flags;
407 int format; 419 int format;
408 uint32_t gen_cntl_reg, gen_cntl_val; 420 uint32_t gen_cntl_reg, gen_cntl_val;
421 int r;
409 422
410 DRM_DEBUG("\n"); 423 DRM_DEBUG("\n");
424 /* no fb bound */
425 if (!crtc->fb) {
426 DRM_DEBUG("No FB bound\n");
427 return 0;
428 }
411 429
412 radeon_fb = to_radeon_framebuffer(crtc->fb); 430 radeon_fb = to_radeon_framebuffer(crtc->fb);
413 431
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
431 return false; 449 return false;
432 } 450 }
433 451
452 /* Pin framebuffer & get tilling informations */
434 obj = radeon_fb->obj; 453 obj = radeon_fb->obj;
435 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { 454 rbo = obj->driver_private;
455 r = radeon_bo_reserve(rbo, false);
456 if (unlikely(r != 0))
457 return r;
458 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
459 if (unlikely(r != 0)) {
460 radeon_bo_unreserve(rbo);
436 return -EINVAL; 461 return -EINVAL;
437 } 462 }
463 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
464 radeon_bo_unreserve(rbo);
465 if (tiling_flags & RADEON_TILING_MICRO)
466 DRM_ERROR("trying to scanout microtiled buffer\n");
467
438 /* if scanout was in GTT this really wouldn't work */ 468 /* if scanout was in GTT this really wouldn't work */
439 /* crtc offset is from display base addr not FB location */ 469 /* crtc offset is from display base addr not FB location */
440 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 470 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
449 (crtc->fb->bits_per_pixel * 8)); 479 (crtc->fb->bits_per_pixel * 8));
450 crtc_pitch |= crtc_pitch << 16; 480 crtc_pitch |= crtc_pitch << 16;
451 481
452 radeon_object_get_tiling_flags(obj->driver_private,
453 &tiling_flags, NULL);
454 if (tiling_flags & RADEON_TILING_MICRO)
455 DRM_ERROR("trying to scanout microtiled buffer\n");
456 482
457 if (tiling_flags & RADEON_TILING_MACRO) { 483 if (tiling_flags & RADEON_TILING_MACRO) {
458 if (ASIC_IS_R300(rdev)) 484 if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
530 556
531 if (old_fb && old_fb != crtc->fb) { 557 if (old_fb && old_fb != crtc->fb) {
532 radeon_fb = to_radeon_framebuffer(old_fb); 558 radeon_fb = to_radeon_framebuffer(old_fb);
533 radeon_gem_object_unpin(radeon_fb->obj); 559 rbo = radeon_fb->obj->driver_private;
560 r = radeon_bo_reserve(rbo, false);
561 if (unlikely(r != 0))
562 return r;
563 radeon_bo_unpin(rbo);
564 radeon_bo_unreserve(rbo);
534 } 565 }
535 566
536 /* Bytes per pixel may have changed */ 567 /* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
642 uint32_t crtc2_gen_cntl; 673 uint32_t crtc2_gen_cntl;
643 uint32_t disp2_merge_cntl; 674 uint32_t disp2_merge_cntl;
644 675
645 /* check to see if TV DAC is enabled for another crtc and keep it enabled */ 676 /* if TV DAC is enabled for another crtc and keep it enabled */
646 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) 677 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
647 crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
648 else
649 crtc2_gen_cntl = 0;
650
651 crtc2_gen_cntl |= ((format << 8) 678 crtc2_gen_cntl |= ((format << 8)
652 | RADEON_CRTC2_VSYNC_DIS 679 | RADEON_CRTC2_VSYNC_DIS
653 | RADEON_CRTC2_HSYNC_DIS 680 | RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
676 uint32_t crtc_ext_cntl; 703 uint32_t crtc_ext_cntl;
677 uint32_t disp_merge_cntl; 704 uint32_t disp_merge_cntl;
678 705
679 crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN 706 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
707 crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
680 | (format << 8) 708 | (format << 8)
681 | RADEON_CRTC_DISP_REQ_EN_B 709 | RADEON_CRTC_DISP_REQ_EN_B
682 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) 710 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
779 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 807 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
780 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 808 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
781 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 809 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
782 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 810 if (!rdev->is_atom_bios) {
783 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; 811 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
784 if (lvds) { 812 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
785 if (lvds->use_bios_dividers) { 813 if (lvds) {
786 pll_ref_div = lvds->panel_ref_divider; 814 if (lvds->use_bios_dividers) {
787 pll_fb_post_div = (lvds->panel_fb_divider | 815 pll_ref_div = lvds->panel_ref_divider;
788 (lvds->panel_post_divider << 16)); 816 pll_fb_post_div = (lvds->panel_fb_divider |
789 htotal_cntl = 0; 817 (lvds->panel_post_divider << 16));
790 use_bios_divs = true; 818 htotal_cntl = 0;
819 use_bios_divs = true;
820 }
791 } 821 }
792 } 822 }
793 pll_flags |= RADEON_PLL_USE_REF_DIV; 823 pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1027 radeon_crtc_set_base(crtc, x, y, old_fb); 1057 radeon_crtc_set_base(crtc, x, y, old_fb);
1028 radeon_set_crtc_timing(crtc, adjusted_mode); 1058 radeon_set_crtc_timing(crtc, adjusted_mode);
1029 radeon_set_pll(crtc, adjusted_mode); 1059 radeon_set_pll(crtc, adjusted_mode);
1060 radeon_overscan_setup(crtc, adjusted_mode);
1030 if (radeon_crtc->crtc_id == 0) { 1061 if (radeon_crtc->crtc_id == 0) {
1031 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); 1062 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
1032 } else { 1063 } else {
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1042 1073
1043static void radeon_crtc_prepare(struct drm_crtc *crtc) 1074static void radeon_crtc_prepare(struct drm_crtc *crtc)
1044{ 1075{
1045 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1076 struct drm_device *dev = crtc->dev;
1077 struct drm_crtc *crtci;
1078
1079 /*
1080 * The hardware wedges sometimes if you reconfigure one CRTC
1081 * whilst another is running (see fdo bug #24611).
1082 */
1083 list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
1084 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
1046} 1085}
1047 1086
1048static void radeon_crtc_commit(struct drm_crtc *crtc) 1087static void radeon_crtc_commit(struct drm_crtc *crtc)
1049{ 1088{
1050 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1089 struct drm_device *dev = crtc->dev;
1090 struct drm_crtc *crtci;
1091
1092 /*
1093 * Reenable the CRTCs that should be running.
1094 */
1095 list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
1096 if (crtci->enabled)
1097 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
1098 }
1051} 1099}
1052 1100
1053static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1101static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 00382122869b..df00515e81fa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
136 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; 136 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
137 137
138 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); 138 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
139 if ((!rdev->is_atom_bios)) { 139 if (rdev->is_atom_bios) {
140 /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
141 * need to call that on resume to set up the reg properly.
142 */
143 radeon_encoder->pixel_clock = adjusted_mode->clock;
144 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
145 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
146 } else {
140 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; 147 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
141 if (lvds) { 148 if (lvds) {
142 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); 149 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
147 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); 154 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
148 } else 155 } else
149 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 156 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
150 } else 157 }
151 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
152 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 158 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
153 lvds_gen_cntl &= ~(RADEON_LVDS_ON | 159 lvds_gen_cntl &= ~(RADEON_LVDS_ON |
154 RADEON_LVDS_BLON | 160 RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
184 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 190 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
185} 191}
186 192
187static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, 193static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
188 struct drm_display_mode *mode, 194 struct drm_display_mode *mode,
189 struct drm_display_mode *adjusted_mode) 195 struct drm_display_mode *adjusted_mode)
190{ 196{
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 197 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 198
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
194 radeon_encoder_set_active_device(encoder); 200 radeon_encoder_set_active_device(encoder);
195 drm_mode_set_crtcinfo(adjusted_mode, 0); 201 drm_mode_set_crtcinfo(adjusted_mode, 0);
196 202
197 if (radeon_encoder->rmx_type != RMX_OFF) 203 /* get the native mode for LVDS */
198 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); 204 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
205 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
206 int mode_id = adjusted_mode->base.id;
207 *adjusted_mode = *native_mode;
208 adjusted_mode->hdisplay = mode->hdisplay;
209 adjusted_mode->vdisplay = mode->vdisplay;
210 adjusted_mode->base.id = mode_id;
211 }
199 212
200 return true; 213 return true;
201} 214}
202 215
203static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { 216static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
204 .dpms = radeon_legacy_lvds_dpms, 217 .dpms = radeon_legacy_lvds_dpms,
205 .mode_fixup = radeon_legacy_lvds_mode_fixup, 218 .mode_fixup = radeon_legacy_mode_fixup,
206 .prepare = radeon_legacy_lvds_prepare, 219 .prepare = radeon_legacy_lvds_prepare,
207 .mode_set = radeon_legacy_lvds_mode_set, 220 .mode_set = radeon_legacy_lvds_mode_set,
208 .commit = radeon_legacy_lvds_commit, 221 .commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
214 .destroy = radeon_enc_destroy, 227 .destroy = radeon_enc_destroy,
215}; 228};
216 229
217static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
218 struct drm_display_mode *mode,
219 struct drm_display_mode *adjusted_mode)
220{
221 /* set the active encoder to connector routing */
222 radeon_encoder_set_active_device(encoder);
223 drm_mode_set_crtcinfo(adjusted_mode, 0);
224
225 return true;
226}
227
228static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) 230static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
229{ 231{
230 struct drm_device *dev = encoder->dev; 232 struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
410 412
411static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { 413static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
412 .dpms = radeon_legacy_primary_dac_dpms, 414 .dpms = radeon_legacy_primary_dac_dpms,
413 .mode_fixup = radeon_legacy_primary_dac_mode_fixup, 415 .mode_fixup = radeon_legacy_mode_fixup,
414 .prepare = radeon_legacy_primary_dac_prepare, 416 .prepare = radeon_legacy_primary_dac_prepare,
415 .mode_set = radeon_legacy_primary_dac_mode_set, 417 .mode_set = radeon_legacy_primary_dac_mode_set,
416 .commit = radeon_legacy_primary_dac_commit, 418 .commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
423 .destroy = radeon_enc_destroy, 425 .destroy = radeon_enc_destroy,
424}; 426};
425 427
426static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
427 struct drm_display_mode *mode,
428 struct drm_display_mode *adjusted_mode)
429{
430
431 drm_mode_set_crtcinfo(adjusted_mode, 0);
432
433 return true;
434}
435
436static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) 428static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
437{ 429{
438 struct drm_device *dev = encoder->dev; 430 struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
584 576
585static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { 577static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
586 .dpms = radeon_legacy_tmds_int_dpms, 578 .dpms = radeon_legacy_tmds_int_dpms,
587 .mode_fixup = radeon_legacy_tmds_int_mode_fixup, 579 .mode_fixup = radeon_legacy_mode_fixup,
588 .prepare = radeon_legacy_tmds_int_prepare, 580 .prepare = radeon_legacy_tmds_int_prepare,
589 .mode_set = radeon_legacy_tmds_int_mode_set, 581 .mode_set = radeon_legacy_tmds_int_mode_set,
590 .commit = radeon_legacy_tmds_int_commit, 582 .commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
596 .destroy = radeon_enc_destroy, 588 .destroy = radeon_enc_destroy,
597}; 589};
598 590
599static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
600 struct drm_display_mode *mode,
601 struct drm_display_mode *adjusted_mode)
602{
603 /* set the active encoder to connector routing */
604 radeon_encoder_set_active_device(encoder);
605 drm_mode_set_crtcinfo(adjusted_mode, 0);
606
607 return true;
608}
609
610static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) 591static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
611{ 592{
612 struct drm_device *dev = encoder->dev; 593 struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
697 /*if (mode->clock > 165000) 678 /*if (mode->clock > 165000)
698 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ 679 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
699 } 680 }
681 if (!radeon_combios_external_tmds_setup(encoder))
682 radeon_external_tmds_setup(encoder);
700 } 683 }
701 684
702 if (radeon_crtc->crtc_id == 0) { 685 if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
724 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 707 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
725} 708}
726 709
710static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
711{
712 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
713 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
714 if (tmds) {
715 if (tmds->i2c_bus)
716 radeon_i2c_destroy(tmds->i2c_bus);
717 }
718 kfree(radeon_encoder->enc_priv);
719 drm_encoder_cleanup(encoder);
720 kfree(radeon_encoder);
721}
722
727static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { 723static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
728 .dpms = radeon_legacy_tmds_ext_dpms, 724 .dpms = radeon_legacy_tmds_ext_dpms,
729 .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, 725 .mode_fixup = radeon_legacy_mode_fixup,
730 .prepare = radeon_legacy_tmds_ext_prepare, 726 .prepare = radeon_legacy_tmds_ext_prepare,
731 .mode_set = radeon_legacy_tmds_ext_mode_set, 727 .mode_set = radeon_legacy_tmds_ext_mode_set,
732 .commit = radeon_legacy_tmds_ext_commit, 728 .commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
735 731
736 732
737static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { 733static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
738 .destroy = radeon_enc_destroy, 734 .destroy = radeon_ext_tmds_enc_destroy,
739}; 735};
740 736
741static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
742 struct drm_display_mode *mode,
743 struct drm_display_mode *adjusted_mode)
744{
745 /* set the active encoder to connector routing */
746 radeon_encoder_set_active_device(encoder);
747 drm_mode_set_crtcinfo(adjusted_mode, 0);
748
749 return true;
750}
751
752static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) 737static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
753{ 738{
754 struct drm_device *dev = encoder->dev; 739 struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1265 1250
1266static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { 1251static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
1267 .dpms = radeon_legacy_tv_dac_dpms, 1252 .dpms = radeon_legacy_tv_dac_dpms,
1268 .mode_fixup = radeon_legacy_tv_dac_mode_fixup, 1253 .mode_fixup = radeon_legacy_mode_fixup,
1269 .prepare = radeon_legacy_tv_dac_prepare, 1254 .prepare = radeon_legacy_tv_dac_prepare,
1270 .mode_set = radeon_legacy_tv_dac_mode_set, 1255 .mode_set = radeon_legacy_tv_dac_mode_set,
1271 .commit = radeon_legacy_tv_dac_commit, 1256 .commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
1302 return tmds; 1287 return tmds;
1303} 1288}
1304 1289
1290static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
1291{
1292 struct drm_device *dev = encoder->base.dev;
1293 struct radeon_device *rdev = dev->dev_private;
1294 struct radeon_encoder_ext_tmds *tmds = NULL;
1295 bool ret;
1296
1297 if (rdev->is_atom_bios)
1298 return NULL;
1299
1300 tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
1301
1302 if (!tmds)
1303 return NULL;
1304
1305 ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
1306
1307 if (ret == false)
1308 radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
1309
1310 return tmds;
1311}
1312
1305void 1313void
1306radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1314radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1307{ 1315{
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1329 encoder->possible_crtcs = 0x1; 1337 encoder->possible_crtcs = 0x1;
1330 else 1338 else
1331 encoder->possible_crtcs = 0x3; 1339 encoder->possible_crtcs = 0x3;
1332 encoder->possible_clones = 0;
1333 1340
1334 radeon_encoder->enc_priv = NULL; 1341 radeon_encoder->enc_priv = NULL;
1335 1342
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1373 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); 1380 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
1374 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); 1381 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1375 if (!rdev->is_atom_bios) 1382 if (!rdev->is_atom_bios)
1376 radeon_combios_get_ext_tmds_info(radeon_encoder); 1383 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
1377 break; 1384 break;
1378 } 1385 }
1379} 1386}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726aa0d76..44d4b652ea12 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
33#include <drm_crtc.h> 33#include <drm_crtc.h>
34#include <drm_mode.h> 34#include <drm_mode.h>
35#include <drm_edid.h> 35#include <drm_edid.h>
36#include <drm_dp_helper.h>
36#include <linux/i2c.h> 37#include <linux/i2c.h>
37#include <linux/i2c-id.h> 38#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h> 39#include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@ enum radeon_tv_std {
89 TV_STD_PAL_CN, 90 TV_STD_PAL_CN,
90}; 91};
91 92
93/* radeon gpio-based i2c
94 * 1. "mask" reg and bits
95 * grabs the gpio pins for software use
96 * 0=not held 1=held
97 * 2. "a" reg and bits
98 * output pin value
99 * 0=low 1=high
100 * 3. "en" reg and bits
101 * sets the pin direction
102 * 0=input 1=output
103 * 4. "y" reg and bits
104 * input pin value
105 * 0=low 1=high
106 */
92struct radeon_i2c_bus_rec { 107struct radeon_i2c_bus_rec {
93 bool valid; 108 bool valid;
109 /* id used by atom */
110 uint8_t i2c_id;
111 /* can be used with hw i2c engine */
112 bool hw_capable;
113 /* uses multi-media i2c engine */
114 bool mm_i2c;
115 /* regs and bits */
94 uint32_t mask_clk_reg; 116 uint32_t mask_clk_reg;
95 uint32_t mask_data_reg; 117 uint32_t mask_data_reg;
96 uint32_t a_clk_reg; 118 uint32_t a_clk_reg;
97 uint32_t a_data_reg; 119 uint32_t a_data_reg;
98 uint32_t put_clk_reg; 120 uint32_t en_clk_reg;
99 uint32_t put_data_reg; 121 uint32_t en_data_reg;
100 uint32_t get_clk_reg; 122 uint32_t y_clk_reg;
101 uint32_t get_data_reg; 123 uint32_t y_data_reg;
102 uint32_t mask_clk_mask; 124 uint32_t mask_clk_mask;
103 uint32_t mask_data_mask; 125 uint32_t mask_data_mask;
104 uint32_t put_clk_mask;
105 uint32_t put_data_mask;
106 uint32_t get_clk_mask;
107 uint32_t get_data_mask;
108 uint32_t a_clk_mask; 126 uint32_t a_clk_mask;
109 uint32_t a_data_mask; 127 uint32_t a_data_mask;
128 uint32_t en_clk_mask;
129 uint32_t en_data_mask;
130 uint32_t y_clk_mask;
131 uint32_t y_data_mask;
110}; 132};
111 133
112struct radeon_tmds_pll { 134struct radeon_tmds_pll {
@@ -150,9 +172,12 @@ struct radeon_pll {
150}; 172};
151 173
152struct radeon_i2c_chan { 174struct radeon_i2c_chan {
153 struct drm_device *dev;
154 struct i2c_adapter adapter; 175 struct i2c_adapter adapter;
155 struct i2c_algo_bit_data algo; 176 struct drm_device *dev;
177 union {
178 struct i2c_algo_dp_aux_data dp;
179 struct i2c_algo_bit_data bit;
180 } algo;
156 struct radeon_i2c_bus_rec rec; 181 struct radeon_i2c_bus_rec rec;
157}; 182};
158 183
@@ -170,6 +195,11 @@ enum radeon_connector_table {
170 CT_EMAC, 195 CT_EMAC,
171}; 196};
172 197
198enum radeon_dvo_chip {
199 DVO_SIL164,
200 DVO_SIL1178,
201};
202
173struct radeon_mode_info { 203struct radeon_mode_info {
174 struct atom_context *atom_context; 204 struct atom_context *atom_context;
175 struct card_info *atom_card_info; 205 struct card_info *atom_card_info;
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
261 struct radeon_tmds_pll tmds_pll[4]; 291 struct radeon_tmds_pll tmds_pll[4];
262}; 292};
263 293
294struct radeon_encoder_ext_tmds {
295 /* tmds over dvo */
296 struct radeon_i2c_chan *i2c_bus;
297 uint8_t slave_addr;
298 enum radeon_dvo_chip dvo_chip;
299};
300
264/* spread spectrum */ 301/* spread spectrum */
265struct radeon_atom_ss { 302struct radeon_atom_ss {
266 uint16_t percentage; 303 uint16_t percentage;
@@ -302,6 +339,35 @@ struct radeon_encoder {
302struct radeon_connector_atom_dig { 339struct radeon_connector_atom_dig {
303 uint32_t igp_lane_info; 340 uint32_t igp_lane_info;
304 bool linkb; 341 bool linkb;
342 /* displayport */
343 struct radeon_i2c_chan *dp_i2c_bus;
344 u8 dpcd[8];
345 u8 dp_sink_type;
346 int dp_clock;
347 int dp_lane_count;
348};
349
350struct radeon_gpio_rec {
351 bool valid;
352 u8 id;
353 u32 reg;
354 u32 mask;
355};
356
357enum radeon_hpd_id {
358 RADEON_HPD_NONE = 0,
359 RADEON_HPD_1,
360 RADEON_HPD_2,
361 RADEON_HPD_3,
362 RADEON_HPD_4,
363 RADEON_HPD_5,
364 RADEON_HPD_6,
365};
366
367struct radeon_hpd {
368 enum radeon_hpd_id hpd;
369 u8 plugged_state;
370 struct radeon_gpio_rec gpio;
305}; 371};
306 372
307struct radeon_connector { 373struct radeon_connector {
@@ -318,6 +384,7 @@ struct radeon_connector {
318 void *con_priv; 384 void *con_priv;
319 bool dac_load_detect; 385 bool dac_load_detect;
320 uint16_t connector_object_id; 386 uint16_t connector_object_id;
387 struct radeon_hpd hpd;
321}; 388};
322 389
323struct radeon_framebuffer { 390struct radeon_framebuffer {
@@ -325,10 +392,37 @@ struct radeon_framebuffer {
325 struct drm_gem_object *obj; 392 struct drm_gem_object *obj;
326}; 393};
327 394
395extern void radeon_connector_hotplug(struct drm_connector *connector);
396extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
397extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
398 struct drm_display_mode *mode);
399extern void radeon_dp_set_link_config(struct drm_connector *connector,
400 struct drm_display_mode *mode);
401extern void dp_link_train(struct drm_encoder *encoder,
402 struct drm_connector *connector);
403extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
404extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
405extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
406 int action, uint8_t lane_num,
407 uint8_t lane_set);
408extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
409 uint8_t write_byte, uint8_t *read_byte);
410
411extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
412 struct radeon_i2c_bus_rec *rec,
413 const char *name);
328extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 414extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
329 struct radeon_i2c_bus_rec *rec, 415 struct radeon_i2c_bus_rec *rec,
330 const char *name); 416 const char *name);
331extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 417extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
418extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
419 u8 slave_addr,
420 u8 addr,
421 u8 *val);
422extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
423 u8 slave_addr,
424 u8 addr,
425 u8 val);
332extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 426extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
333extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 427extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
334 428
@@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
343 uint32_t *post_div_p, 437 uint32_t *post_div_p,
344 int flags); 438 int flags);
345 439
440extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
441 uint64_t freq,
442 uint32_t *dot_clock_p,
443 uint32_t *fb_div_p,
444 uint32_t *frac_fb_div_p,
445 uint32_t *ref_div_p,
446 uint32_t *post_div_p,
447 int flags);
448
449extern void radeon_setup_encoder_clones(struct drm_device *dev);
450
346struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 451struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
347struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); 452struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
348struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 453struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
349struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 454struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
350struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 455struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
351extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); 456extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
457extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
352extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 458extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
353extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 459extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
354 460
@@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
378extern bool radeon_combios_get_clock_info(struct drm_device *dev); 484extern bool radeon_combios_get_clock_info(struct drm_device *dev);
379extern struct radeon_encoder_atom_dig * 485extern struct radeon_encoder_atom_dig *
380radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); 486radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
381bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, 487extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
382 struct radeon_encoder_int_tmds *tmds); 488 struct radeon_encoder_int_tmds *tmds);
383bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, 489extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
384 struct radeon_encoder_int_tmds *tmds); 490 struct radeon_encoder_int_tmds *tmds);
385bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, 491extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
386 struct radeon_encoder_int_tmds *tmds); 492 struct radeon_encoder_int_tmds *tmds);
493extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
494 struct radeon_encoder_ext_tmds *tmds);
495extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
496 struct radeon_encoder_ext_tmds *tmds);
387extern struct radeon_encoder_primary_dac * 497extern struct radeon_encoder_primary_dac *
388radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); 498radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
389extern struct radeon_encoder_tv_dac * 499extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
395radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); 505radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
396extern struct radeon_encoder_primary_dac * 506extern struct radeon_encoder_primary_dac *
397radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); 507radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
508extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
509extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
398extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); 510extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
399extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); 511extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
400extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); 512extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
426 struct radeon_crtc *radeon_crtc); 538 struct radeon_crtc *radeon_crtc);
427void radeon_legacy_init_crtc(struct drm_device *dev, 539void radeon_legacy_init_crtc(struct drm_device *dev,
428 struct radeon_crtc *radeon_crtc); 540 struct radeon_crtc *radeon_crtc);
429void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); 541extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
430 542
431void radeon_get_clock_info(struct drm_device *dev); 543void radeon_get_clock_info(struct drm_device *dev);
432 544
433extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); 545extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
434extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); 546extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
435 547
436void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
437 struct drm_display_mode *mode,
438 struct drm_display_mode *adjusted_mode);
439void radeon_enc_destroy(struct drm_encoder *encoder); 548void radeon_enc_destroy(struct drm_encoder *encoder);
440void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 549void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
441void radeon_combios_asic_init(struct drm_device *dev); 550void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c2..2040937682fd 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,74 +34,32 @@
34#include "radeon_drm.h" 34#include "radeon_drm.h"
35#include "radeon.h" 35#include "radeon.h"
36 36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
50};
51 37
52int radeon_ttm_init(struct radeon_device *rdev); 38int radeon_ttm_init(struct radeon_device *rdev);
53void radeon_ttm_fini(struct radeon_device *rdev); 39void radeon_ttm_fini(struct radeon_device *rdev);
40static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
54 41
55/* 42/*
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it. 44 * function are calling it.
58 */ 45 */
59 46
60static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) 47static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
61{ 48{
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); 49 struct radeon_bo *bo;
63}
64
65static void radeon_object_unreserve(struct radeon_object *robj)
66{
67 ttm_bo_unreserve(&robj->tobj);
68}
69
70static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
71{
72 struct radeon_object *robj;
73 50
74 robj = container_of(tobj, struct radeon_object, tobj); 51 bo = container_of(tbo, struct radeon_bo, tbo);
75 list_del_init(&robj->list); 52 mutex_lock(&bo->rdev->gem.mutex);
76 radeon_object_clear_surface_reg(robj); 53 list_del_init(&bo->list);
77 kfree(robj); 54 mutex_unlock(&bo->rdev->gem.mutex);
55 radeon_bo_clear_surface_reg(bo);
56 kfree(bo);
78} 57}
79 58
80static inline void radeon_object_gpu_addr(struct radeon_object *robj) 59static inline u32 radeon_ttm_flags_from_domain(u32 domain)
81{ 60{
82 /* Default gpu address */ 61 u32 flags = 0;
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
86 }
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
99 }
100}
101 62
102static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103{
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) { 63 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; 64 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 } 65 }
@@ -117,17 +75,32 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
117 return flags; 75 return flags;
118} 76}
119 77
120int radeon_object_create(struct radeon_device *rdev, 78void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
121 struct drm_gem_object *gobj, 79{
122 unsigned long size, 80 u32 c = 0;
123 bool kernel, 81
124 uint32_t domain, 82 rbo->placement.fpfn = 0;
125 bool interruptible, 83 rbo->placement.lpfn = 0;
126 struct radeon_object **robj_ptr) 84 rbo->placement.placement = rbo->placements;
85 rbo->placement.busy_placement = rbo->placements;
86 if (domain & RADEON_GEM_DOMAIN_VRAM)
87 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
88 TTM_PL_FLAG_VRAM;
89 if (domain & RADEON_GEM_DOMAIN_GTT)
90 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
91 if (domain & RADEON_GEM_DOMAIN_CPU)
92 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
93 rbo->placement.num_placement = c;
94 rbo->placement.num_busy_placement = c;
95}
96
97int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
98 unsigned long size, bool kernel, u32 domain,
99 struct radeon_bo **bo_ptr)
127{ 100{
128 struct radeon_object *robj; 101 struct radeon_bo *bo;
129 enum ttm_bo_type type; 102 enum ttm_bo_type type;
130 uint32_t flags; 103 u32 flags;
131 int r; 104 int r;
132 105
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 106 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +111,125 @@ int radeon_object_create(struct radeon_device *rdev,
138 } else { 111 } else {
139 type = ttm_bo_type_device; 112 type = ttm_bo_type_device;
140 } 113 }
141 *robj_ptr = NULL; 114 *bo_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); 115 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
143 if (robj == NULL) { 116 if (bo == NULL)
144 return -ENOMEM; 117 return -ENOMEM;
145 } 118 bo->rdev = rdev;
146 robj->rdev = rdev; 119 bo->gobj = gobj;
147 robj->gobj = gobj; 120 bo->surface_reg = -1;
148 robj->surface_reg = -1; 121 INIT_LIST_HEAD(&bo->list);
149 INIT_LIST_HEAD(&robj->list); 122
150 123 flags = radeon_ttm_flags_from_domain(domain);
151 flags = radeon_object_flags_from_domain(domain); 124 /* Kernel allocation are uninterruptible */
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, 125 r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
153 0, 0, false, NULL, size, 126 flags, 0, 0, !kernel, NULL, size,
154 &radeon_ttm_object_object_destroy); 127 &radeon_ttm_bo_destroy);
155 if (unlikely(r != 0)) { 128 if (unlikely(r != 0)) {
156 /* ttm call radeon_ttm_object_object_destroy if error happen */ 129 if (r != -ERESTARTSYS)
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", 130 dev_err(rdev->dev,
158 size, flags, 0); 131 "object_init failed for (%ld, 0x%08X)\n",
132 size, flags);
159 return r; 133 return r;
160 } 134 }
161 *robj_ptr = robj; 135 *bo_ptr = bo;
162 if (gobj) { 136 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects); 137 mutex_lock(&bo->rdev->gem.mutex);
138 list_add_tail(&bo->list, &rdev->gem.objects);
139 mutex_unlock(&bo->rdev->gem.mutex);
164 } 140 }
165 return 0; 141 return 0;
166} 142}
167 143
168int radeon_object_kmap(struct radeon_object *robj, void **ptr) 144int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
169{ 145{
146 bool is_iomem;
170 int r; 147 int r;
171 148
172 spin_lock(&robj->tobj.lock); 149 if (bo->kptr) {
173 if (robj->kptr) {
174 if (ptr) { 150 if (ptr) {
175 *ptr = robj->kptr; 151 *ptr = bo->kptr;
176 } 152 }
177 spin_unlock(&robj->tobj.lock);
178 return 0; 153 return 0;
179 } 154 }
180 spin_unlock(&robj->tobj.lock); 155 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) { 156 if (r) {
183 return r; 157 return r;
184 } 158 }
185 spin_lock(&robj->tobj.lock); 159 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) { 160 if (ptr) {
189 *ptr = robj->kptr; 161 *ptr = bo->kptr;
190 } 162 }
191 radeon_object_check_tiling(robj, 0, 0); 163 radeon_bo_check_tiling(bo, 0, 0);
192 return 0; 164 return 0;
193} 165}
194 166
195void radeon_object_kunmap(struct radeon_object *robj) 167void radeon_bo_kunmap(struct radeon_bo *bo)
196{ 168{
197 spin_lock(&robj->tobj.lock); 169 if (bo->kptr == NULL)
198 if (robj->kptr == NULL) {
199 spin_unlock(&robj->tobj.lock);
200 return; 170 return;
201 } 171 bo->kptr = NULL;
202 robj->kptr = NULL; 172 radeon_bo_check_tiling(bo, 0, 0);
203 spin_unlock(&robj->tobj.lock); 173 ttm_bo_kunmap(&bo->kmap);
204 radeon_object_check_tiling(robj, 0, 0);
205 ttm_bo_kunmap(&robj->kmap);
206} 174}
207 175
208void radeon_object_unref(struct radeon_object **robj) 176void radeon_bo_unref(struct radeon_bo **bo)
209{ 177{
210 struct ttm_buffer_object *tobj; 178 struct ttm_buffer_object *tbo;
211 179
212 if ((*robj) == NULL) { 180 if ((*bo) == NULL)
213 return; 181 return;
214 } 182 tbo = &((*bo)->tbo);
215 tobj = &((*robj)->tobj); 183 ttm_bo_unref(&tbo);
216 ttm_bo_unref(&tobj); 184 if (tbo == NULL)
217 if (tobj == NULL) { 185 *bo = NULL;
218 *robj = NULL;
219 }
220}
221
222int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
223{
224 *offset = robj->tobj.addr_space_offset;
225 return 0;
226} 186}
227 187
228int radeon_object_pin(struct radeon_object *robj, uint32_t domain, 188int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
229 uint64_t *gpu_addr)
230{ 189{
231 uint32_t flags; 190 int r, i;
232 uint32_t tmp;
233 int r;
234 191
235 flags = radeon_object_flags_from_domain(domain); 192 radeon_ttm_placement_from_domain(bo, domain);
236 spin_lock(&robj->tobj.lock); 193 if (bo->pin_count) {
237 if (robj->pin_count) { 194 bo->pin_count++;
238 robj->pin_count++; 195 if (gpu_addr)
239 if (gpu_addr != NULL) { 196 *gpu_addr = radeon_bo_gpu_offset(bo);
240 *gpu_addr = robj->gpu_addr;
241 }
242 spin_unlock(&robj->tobj.lock);
243 return 0; 197 return 0;
244 } 198 }
245 spin_unlock(&robj->tobj.lock); 199 radeon_ttm_placement_from_domain(bo, domain);
246 r = radeon_object_reserve(robj, false); 200 for (i = 0; i < bo->placement.num_placement; i++)
247 if (unlikely(r != 0)) { 201 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
248 DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); 202 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
249 return r; 203 if (likely(r == 0)) {
250 } 204 bo->pin_count = 1;
251 tmp = robj->tobj.mem.placement; 205 if (gpu_addr != NULL)
252 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 206 *gpu_addr = radeon_bo_gpu_offset(bo);
253 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 207 }
254 r = ttm_buffer_object_validate(&robj->tobj, 208 if (unlikely(r != 0))
255 robj->tobj.proposed_placement, 209 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
256 false, false);
257 radeon_object_gpu_addr(robj);
258 if (gpu_addr != NULL) {
259 *gpu_addr = robj->gpu_addr;
260 }
261 robj->pin_count = 1;
262 if (unlikely(r != 0)) {
263 DRM_ERROR("radeon: failed to pin object.\n");
264 }
265 radeon_object_unreserve(robj);
266 return r; 210 return r;
267} 211}
268 212
269void radeon_object_unpin(struct radeon_object *robj) 213int radeon_bo_unpin(struct radeon_bo *bo)
270{ 214{
271 uint32_t flags; 215 int r, i;
272 int r;
273 216
274 spin_lock(&robj->tobj.lock); 217 if (!bo->pin_count) {
275 if (!robj->pin_count) { 218 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
276 spin_unlock(&robj->tobj.lock); 219 return 0;
277 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
278 return;
279 }
280 robj->pin_count--;
281 if (robj->pin_count) {
282 spin_unlock(&robj->tobj.lock);
283 return;
284 }
285 spin_unlock(&robj->tobj.lock);
286 r = radeon_object_reserve(robj, false);
287 if (unlikely(r != 0)) {
288 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
289 return;
290 }
291 flags = robj->tobj.mem.placement;
292 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
293 r = ttm_buffer_object_validate(&robj->tobj,
294 robj->tobj.proposed_placement,
295 false, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to unpin buffer.\n");
298 }
299 radeon_object_unreserve(robj);
300}
301
302int radeon_object_wait(struct radeon_object *robj)
303{
304 int r = 0;
305
306 /* FIXME: should use block reservation instead */
307 r = radeon_object_reserve(robj, true);
308 if (unlikely(r != 0)) {
309 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
310 return r;
311 }
312 spin_lock(&robj->tobj.lock);
313 if (robj->tobj.sync_obj) {
314 r = ttm_bo_wait(&robj->tobj, true, true, false);
315 }
316 spin_unlock(&robj->tobj.lock);
317 radeon_object_unreserve(robj);
318 return r;
319}
320
321int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
322{
323 int r = 0;
324
325 r = radeon_object_reserve(robj, true);
326 if (unlikely(r != 0)) {
327 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
328 return r;
329 }
330 spin_lock(&robj->tobj.lock);
331 *cur_placement = robj->tobj.mem.mem_type;
332 if (robj->tobj.sync_obj) {
333 r = ttm_bo_wait(&robj->tobj, true, true, true);
334 } 220 }
335 spin_unlock(&robj->tobj.lock); 221 bo->pin_count--;
336 radeon_object_unreserve(robj); 222 if (bo->pin_count)
223 return 0;
224 for (i = 0; i < bo->placement.num_placement; i++)
225 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
226 r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
227 if (unlikely(r != 0))
228 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
337 return r; 229 return r;
338} 230}
339 231
340int radeon_object_evict_vram(struct radeon_device *rdev) 232int radeon_bo_evict_vram(struct radeon_device *rdev)
341{ 233{
342 if (rdev->flags & RADEON_IS_IGP) { 234 if (rdev->flags & RADEON_IS_IGP) {
343 /* Useless to evict on IGP chips */ 235 /* Useless to evict on IGP chips */
@@ -346,30 +238,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
346 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 238 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
347} 239}
348 240
349void radeon_object_force_delete(struct radeon_device *rdev) 241void radeon_bo_force_delete(struct radeon_device *rdev)
350{ 242{
351 struct radeon_object *robj, *n; 243 struct radeon_bo *bo, *n;
352 struct drm_gem_object *gobj; 244 struct drm_gem_object *gobj;
353 245
354 if (list_empty(&rdev->gem.objects)) { 246 if (list_empty(&rdev->gem.objects)) {
355 return; 247 return;
356 } 248 }
357 DRM_ERROR("Userspace still has active objects !\n"); 249 dev_err(rdev->dev, "Userspace still has active objects !\n");
358 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { 250 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
359 mutex_lock(&rdev->ddev->struct_mutex); 251 mutex_lock(&rdev->ddev->struct_mutex);
360 gobj = robj->gobj; 252 gobj = bo->gobj;
361 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", 253 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
362 gobj, robj, (unsigned long)gobj->size, 254 gobj, bo, (unsigned long)gobj->size,
363 *((unsigned long *)&gobj->refcount)); 255 *((unsigned long *)&gobj->refcount));
364 list_del_init(&robj->list); 256 mutex_lock(&bo->rdev->gem.mutex);
365 radeon_object_unref(&robj); 257 list_del_init(&bo->list);
258 mutex_unlock(&bo->rdev->gem.mutex);
259 radeon_bo_unref(&bo);
366 gobj->driver_private = NULL; 260 gobj->driver_private = NULL;
367 drm_gem_object_unreference(gobj); 261 drm_gem_object_unreference(gobj);
368 mutex_unlock(&rdev->ddev->struct_mutex); 262 mutex_unlock(&rdev->ddev->struct_mutex);
369 } 263 }
370} 264}
371 265
372int radeon_object_init(struct radeon_device *rdev) 266int radeon_bo_init(struct radeon_device *rdev)
373{ 267{
374 /* Add an MTRR for the VRAM */ 268 /* Add an MTRR for the VRAM */
375 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 269 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +276,13 @@ int radeon_object_init(struct radeon_device *rdev)
382 return radeon_ttm_init(rdev); 276 return radeon_ttm_init(rdev);
383} 277}
384 278
385void radeon_object_fini(struct radeon_device *rdev) 279void radeon_bo_fini(struct radeon_device *rdev)
386{ 280{
387 radeon_ttm_fini(rdev); 281 radeon_ttm_fini(rdev);
388} 282}
389 283
390void radeon_object_list_add_object(struct radeon_object_list *lobj, 284void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
391 struct list_head *head) 285 struct list_head *head)
392{ 286{
393 if (lobj->wdomain) { 287 if (lobj->wdomain) {
394 list_add(&lobj->list, head); 288 list_add(&lobj->list, head);
@@ -397,72 +291,63 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
397 } 291 }
398} 292}
399 293
400int radeon_object_list_reserve(struct list_head *head) 294int radeon_bo_list_reserve(struct list_head *head)
401{ 295{
402 struct radeon_object_list *lobj; 296 struct radeon_bo_list *lobj;
403 int r; 297 int r;
404 298
405 list_for_each_entry(lobj, head, list){ 299 list_for_each_entry(lobj, head, list){
406 if (!lobj->robj->pin_count) { 300 r = radeon_bo_reserve(lobj->bo, false);
407 r = radeon_object_reserve(lobj->robj, true); 301 if (unlikely(r != 0))
408 if (unlikely(r != 0)) { 302 return r;
409 DRM_ERROR("radeon: failed to reserve object.\n");
410 return r;
411 }
412 } else {
413 }
414 } 303 }
415 return 0; 304 return 0;
416} 305}
417 306
418void radeon_object_list_unreserve(struct list_head *head) 307void radeon_bo_list_unreserve(struct list_head *head)
419{ 308{
420 struct radeon_object_list *lobj; 309 struct radeon_bo_list *lobj;
421 310
422 list_for_each_entry(lobj, head, list) { 311 list_for_each_entry(lobj, head, list) {
423 if (!lobj->robj->pin_count) { 312 /* only unreserve object we successfully reserved */
424 radeon_object_unreserve(lobj->robj); 313 if (radeon_bo_is_reserved(lobj->bo))
425 } 314 radeon_bo_unreserve(lobj->bo);
426 } 315 }
427} 316}
428 317
429int radeon_object_list_validate(struct list_head *head, void *fence) 318int radeon_bo_list_validate(struct list_head *head, void *fence)
430{ 319{
431 struct radeon_object_list *lobj; 320 struct radeon_bo_list *lobj;
432 struct radeon_object *robj; 321 struct radeon_bo *bo;
433 struct radeon_fence *old_fence = NULL; 322 struct radeon_fence *old_fence = NULL;
434 int r; 323 int r;
435 324
436 r = radeon_object_list_reserve(head); 325 r = radeon_bo_list_reserve(head);
437 if (unlikely(r != 0)) { 326 if (unlikely(r != 0)) {
438 radeon_object_list_unreserve(head);
439 return r; 327 return r;
440 } 328 }
441 list_for_each_entry(lobj, head, list) { 329 list_for_each_entry(lobj, head, list) {
442 robj = lobj->robj; 330 bo = lobj->bo;
443 if (!robj->pin_count) { 331 if (!bo->pin_count) {
444 if (lobj->wdomain) { 332 if (lobj->wdomain) {
445 robj->tobj.proposed_placement = 333 radeon_ttm_placement_from_domain(bo,
446 radeon_object_flags_from_domain(lobj->wdomain); 334 lobj->wdomain);
447 } else { 335 } else {
448 robj->tobj.proposed_placement = 336 radeon_ttm_placement_from_domain(bo,
449 radeon_object_flags_from_domain(lobj->rdomain); 337 lobj->rdomain);
450 } 338 }
451 r = ttm_buffer_object_validate(&robj->tobj, 339 r = ttm_buffer_object_validate(&bo->tbo,
452 robj->tobj.proposed_placement, 340 &bo->placement,
453 true, false); 341 true, false);
454 if (unlikely(r)) { 342 if (unlikely(r))
455 DRM_ERROR("radeon: failed to validate.\n");
456 return r; 343 return r;
457 }
458 radeon_object_gpu_addr(robj);
459 } 344 }
460 lobj->gpu_offset = robj->gpu_addr; 345 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
461 lobj->tiling_flags = robj->tiling_flags; 346 lobj->tiling_flags = bo->tiling_flags;
462 if (fence) { 347 if (fence) {
463 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 348 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
464 robj->tobj.sync_obj = radeon_fence_ref(fence); 349 bo->tbo.sync_obj = radeon_fence_ref(fence);
465 robj->tobj.sync_obj_arg = NULL; 350 bo->tbo.sync_obj_arg = NULL;
466 } 351 }
467 if (old_fence) { 352 if (old_fence) {
468 radeon_fence_unref(&old_fence); 353 radeon_fence_unref(&old_fence);
@@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
471 return 0; 356 return 0;
472} 357}
473 358
474void radeon_object_list_unvalidate(struct list_head *head) 359void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
475{ 360{
476 struct radeon_object_list *lobj; 361 struct radeon_bo_list *lobj;
477 struct radeon_fence *old_fence = NULL; 362 struct radeon_fence *old_fence;
478 363
479 list_for_each_entry(lobj, head, list) { 364 if (fence)
480 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 365 list_for_each_entry(lobj, head, list) {
481 lobj->robj->tobj.sync_obj = NULL; 366 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
482 if (old_fence) { 367 if (old_fence == fence) {
483 radeon_fence_unref(&old_fence); 368 lobj->bo->tbo.sync_obj = NULL;
369 radeon_fence_unref(&old_fence);
370 }
484 } 371 }
485 } 372 radeon_bo_list_unreserve(head);
486 radeon_object_list_unreserve(head);
487}
488
489void radeon_object_list_clean(struct list_head *head)
490{
491 radeon_object_list_unreserve(head);
492} 373}
493 374
494int radeon_object_fbdev_mmap(struct radeon_object *robj, 375int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
495 struct vm_area_struct *vma) 376 struct vm_area_struct *vma)
496{ 377{
497 return ttm_fbdev_mmap(vma, &robj->tobj); 378 return ttm_fbdev_mmap(vma, &bo->tbo);
498} 379}
499 380
500unsigned long radeon_object_size(struct radeon_object *robj) 381int radeon_bo_get_surface_reg(struct radeon_bo *bo)
501{ 382{
502 return robj->tobj.num_pages << PAGE_SHIFT; 383 struct radeon_device *rdev = bo->rdev;
503}
504
505int radeon_object_get_surface_reg(struct radeon_object *robj)
506{
507 struct radeon_device *rdev = robj->rdev;
508 struct radeon_surface_reg *reg; 384 struct radeon_surface_reg *reg;
509 struct radeon_object *old_object; 385 struct radeon_bo *old_object;
510 int steal; 386 int steal;
511 int i; 387 int i;
512 388
513 if (!robj->tiling_flags) 389 BUG_ON(!atomic_read(&bo->tbo.reserved));
390
391 if (!bo->tiling_flags)
514 return 0; 392 return 0;
515 393
516 if (robj->surface_reg >= 0) { 394 if (bo->surface_reg >= 0) {
517 reg = &rdev->surface_regs[robj->surface_reg]; 395 reg = &rdev->surface_regs[bo->surface_reg];
518 i = robj->surface_reg; 396 i = bo->surface_reg;
519 goto out; 397 goto out;
520 } 398 }
521 399
@@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
523 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 401 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
524 402
525 reg = &rdev->surface_regs[i]; 403 reg = &rdev->surface_regs[i];
526 if (!reg->robj) 404 if (!reg->bo)
527 break; 405 break;
528 406
529 old_object = reg->robj; 407 old_object = reg->bo;
530 if (old_object->pin_count == 0) 408 if (old_object->pin_count == 0)
531 steal = i; 409 steal = i;
532 } 410 }
@@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
537 return -ENOMEM; 415 return -ENOMEM;
538 /* find someone with a surface reg and nuke their BO */ 416 /* find someone with a surface reg and nuke their BO */
539 reg = &rdev->surface_regs[steal]; 417 reg = &rdev->surface_regs[steal];
540 old_object = reg->robj; 418 old_object = reg->bo;
541 /* blow away the mapping */ 419 /* blow away the mapping */
542 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 420 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
543 ttm_bo_unmap_virtual(&old_object->tobj); 421 ttm_bo_unmap_virtual(&old_object->tbo);
544 old_object->surface_reg = -1; 422 old_object->surface_reg = -1;
545 i = steal; 423 i = steal;
546 } 424 }
547 425
548 robj->surface_reg = i; 426 bo->surface_reg = i;
549 reg->robj = robj; 427 reg->bo = bo;
550 428
551out: 429out:
552 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, 430 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
553 robj->tobj.mem.mm_node->start << PAGE_SHIFT, 431 bo->tbo.mem.mm_node->start << PAGE_SHIFT,
554 robj->tobj.num_pages << PAGE_SHIFT); 432 bo->tbo.num_pages << PAGE_SHIFT);
555 return 0; 433 return 0;
556} 434}
557 435
558void radeon_object_clear_surface_reg(struct radeon_object *robj) 436static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
559{ 437{
560 struct radeon_device *rdev = robj->rdev; 438 struct radeon_device *rdev = bo->rdev;
561 struct radeon_surface_reg *reg; 439 struct radeon_surface_reg *reg;
562 440
563 if (robj->surface_reg == -1) 441 if (bo->surface_reg == -1)
564 return; 442 return;
565 443
566 reg = &rdev->surface_regs[robj->surface_reg]; 444 reg = &rdev->surface_regs[bo->surface_reg];
567 radeon_clear_surface_reg(rdev, robj->surface_reg); 445 radeon_clear_surface_reg(rdev, bo->surface_reg);
568 446
569 reg->robj = NULL; 447 reg->bo = NULL;
570 robj->surface_reg = -1; 448 bo->surface_reg = -1;
571} 449}
572 450
573void radeon_object_set_tiling_flags(struct radeon_object *robj, 451int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
574 uint32_t tiling_flags, uint32_t pitch) 452 uint32_t tiling_flags, uint32_t pitch)
575{ 453{
576 robj->tiling_flags = tiling_flags; 454 int r;
577 robj->pitch = pitch; 455
456 r = radeon_bo_reserve(bo, false);
457 if (unlikely(r != 0))
458 return r;
459 bo->tiling_flags = tiling_flags;
460 bo->pitch = pitch;
461 radeon_bo_unreserve(bo);
462 return 0;
578} 463}
579 464
580void radeon_object_get_tiling_flags(struct radeon_object *robj, 465void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
581 uint32_t *tiling_flags, 466 uint32_t *tiling_flags,
582 uint32_t *pitch) 467 uint32_t *pitch)
583{ 468{
469 BUG_ON(!atomic_read(&bo->tbo.reserved));
584 if (tiling_flags) 470 if (tiling_flags)
585 *tiling_flags = robj->tiling_flags; 471 *tiling_flags = bo->tiling_flags;
586 if (pitch) 472 if (pitch)
587 *pitch = robj->pitch; 473 *pitch = bo->pitch;
588} 474}
589 475
590int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, 476int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
591 bool force_drop) 477 bool force_drop)
592{ 478{
593 if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) 479 BUG_ON(!atomic_read(&bo->tbo.reserved));
480
481 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
594 return 0; 482 return 0;
595 483
596 if (force_drop) { 484 if (force_drop) {
597 radeon_object_clear_surface_reg(robj); 485 radeon_bo_clear_surface_reg(bo);
598 return 0; 486 return 0;
599 } 487 }
600 488
601 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { 489 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
602 if (!has_moved) 490 if (!has_moved)
603 return 0; 491 return 0;
604 492
605 if (robj->surface_reg >= 0) 493 if (bo->surface_reg >= 0)
606 radeon_object_clear_surface_reg(robj); 494 radeon_bo_clear_surface_reg(bo);
607 return 0; 495 return 0;
608 } 496 }
609 497
610 if ((robj->surface_reg >= 0) && !has_moved) 498 if ((bo->surface_reg >= 0) && !has_moved)
611 return 0; 499 return 0;
612 500
613 return radeon_object_get_surface_reg(robj); 501 return radeon_bo_get_surface_reg(bo);
614} 502}
615 503
616void radeon_bo_move_notify(struct ttm_buffer_object *bo, 504void radeon_bo_move_notify(struct ttm_buffer_object *bo,
617 struct ttm_mem_reg *mem) 505 struct ttm_mem_reg *mem)
618{ 506{
619 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 507 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
620 radeon_object_check_tiling(robj, 0, 1); 508 radeon_bo_check_tiling(rbo, 0, 1);
621} 509}
622 510
623void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 511void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
624{ 512{
625 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 513 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
626 radeon_object_check_tiling(robj, 0, 0); 514 radeon_bo_check_tiling(rbo, 0, 0);
627} 515}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb456..f6b69c2c0d00 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
28#ifndef __RADEON_OBJECT_H__ 28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__ 29#define __RADEON_OBJECT_H__
30 30
31#include <ttm/ttm_bo_api.h> 31#include <drm/radeon_drm.h>
32#include <ttm/ttm_bo_driver.h> 32#include "radeon.h"
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35 33
36/* 34/**
37 * TTM. 35 * radeon_mem_type_to_domain - return domain corresponding to mem_type
36 * @mem_type: ttm memory type
37 *
38 * Returns corresponding domain of the ttm mem_type
39 */
40static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
41{
42 switch (mem_type) {
43 case TTM_PL_VRAM:
44 return RADEON_GEM_DOMAIN_VRAM;
45 case TTM_PL_TT:
46 return RADEON_GEM_DOMAIN_GTT;
47 case TTM_PL_SYSTEM:
48 return RADEON_GEM_DOMAIN_CPU;
49 default:
50 break;
51 }
52 return 0;
53}
54
55/**
56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
58 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
59 *
60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space.
38 */ 64 */
39struct radeon_mman { 65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
40 struct ttm_bo_global_ref bo_global_ref; 66{
41 struct ttm_global_reference mem_global_ref; 67 int r;
42 bool mem_global_referenced; 68
43 struct ttm_bo_device bdev; 69retry:
44}; 70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) {
72 if (r == -ERESTART)
73 goto retry;
74 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
75 return r;
76 }
77 return 0;
78}
79
80static inline void radeon_bo_unreserve(struct radeon_bo *bo)
81{
82 ttm_bo_unreserve(&bo->tbo);
83}
84
85/**
86 * radeon_bo_gpu_offset - return GPU offset of bo
87 * @bo: radeon object for which we query the offset
88 *
89 * Returns current GPU offset of the object.
90 *
91 * Note: object should either be pinned or reserved when calling this
92 * function, it might be usefull to add check for this for debugging.
93 */
94static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
95{
96 return bo->tbo.offset;
97}
98
99static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
100{
101 return bo->tbo.num_pages << PAGE_SHIFT;
102}
103
104static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
105{
106 return !!atomic_read(&bo->tbo.reserved);
107}
108
109/**
110 * radeon_bo_mmap_offset - return mmap offset of bo
111 * @bo: radeon object for which we query the offset
112 *
113 * Returns mmap offset of the object.
114 *
115 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
116 * by any lock.
117 */
118static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
119{
120 return bo->tbo.addr_space_offset;
121}
122
123static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 bool no_wait)
125{
126 int r;
127
128retry:
129 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
130 if (unlikely(r != 0)) {
131 if (r == -ERESTART)
132 goto retry;
133 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
134 return r;
135 }
136 spin_lock(&bo->tbo.lock);
137 if (mem_type)
138 *mem_type = bo->tbo.mem.mem_type;
139 if (bo->tbo.sync_obj)
140 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
141 spin_unlock(&bo->tbo.lock);
142 ttm_bo_unreserve(&bo->tbo);
143 if (unlikely(r == -ERESTART))
144 goto retry;
145 return r;
146}
45 147
148extern int radeon_bo_create(struct radeon_device *rdev,
149 struct drm_gem_object *gobj, unsigned long size,
150 bool kernel, u32 domain,
151 struct radeon_bo **bo_ptr);
152extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
153extern void radeon_bo_kunmap(struct radeon_bo *bo);
154extern void radeon_bo_unref(struct radeon_bo **bo);
155extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
156extern int radeon_bo_unpin(struct radeon_bo *bo);
157extern int radeon_bo_evict_vram(struct radeon_device *rdev);
158extern void radeon_bo_force_delete(struct radeon_device *rdev);
159extern int radeon_bo_init(struct radeon_device *rdev);
160extern void radeon_bo_fini(struct radeon_device *rdev);
161extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
162 struct list_head *head);
163extern int radeon_bo_list_reserve(struct list_head *head);
164extern void radeon_bo_list_unreserve(struct list_head *head);
165extern int radeon_bo_list_validate(struct list_head *head, void *fence);
166extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
167extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
168 struct vm_area_struct *vma);
169extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
170 u32 tiling_flags, u32 pitch);
171extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
172 u32 *tiling_flags, u32 *pitch);
173extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
174 bool force_drop);
175extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
176 struct ttm_mem_reg *mem);
177extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
178extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
46#endif 179#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6a2a06..34b08d307c81 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
27int radeon_pm_init(struct radeon_device *rdev) 27int radeon_pm_init(struct radeon_device *rdev)
28{ 28{
29 if (radeon_debugfs_pm_init(rdev)) { 29 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for CP !\n"); 30 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 31 }
32 32
33 return 0; 33 return 0;
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 44 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 45 struct radeon_device *rdev = dev->dev_private;
46 46
47 seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); 47 seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
48 seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); 48 seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
49 49
50 return 0; 50 return 0;
51} 51}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab75903ec1..6d0a009dd4a1 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
887# define RADEON_FP_PANEL_FORMAT (1 << 3) 887# define RADEON_FP_PANEL_FORMAT (1 << 3)
888# define RADEON_FP_EN_TMDS (1 << 7) 888# define RADEON_FP_EN_TMDS (1 << 7)
889# define RADEON_FP_DETECT_SENSE (1 << 8) 889# define RADEON_FP_DETECT_SENSE (1 << 8)
890# define RADEON_FP_DETECT_INT_POL (1 << 9)
890# define R200_FP_SOURCE_SEL_MASK (3 << 10) 891# define R200_FP_SOURCE_SEL_MASK (3 << 10)
891# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) 892# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
892# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) 893# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@@ -894,6 +895,7 @@
894# define R200_FP_SOURCE_SEL_TRANS (3 << 10) 895# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
895# define RADEON_FP_SEL_CRTC1 (0 << 13) 896# define RADEON_FP_SEL_CRTC1 (0 << 13)
896# define RADEON_FP_SEL_CRTC2 (1 << 13) 897# define RADEON_FP_SEL_CRTC2 (1 << 13)
898# define R300_HPD_SEL(x) ((x) << 13)
897# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) 899# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
898# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) 900# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
899# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) 901# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
909# define RADEON_FP2_ON (1 << 2) 911# define RADEON_FP2_ON (1 << 2)
910# define RADEON_FP2_PANEL_FORMAT (1 << 3) 912# define RADEON_FP2_PANEL_FORMAT (1 << 3)
911# define RADEON_FP2_DETECT_SENSE (1 << 8) 913# define RADEON_FP2_DETECT_SENSE (1 << 8)
914# define RADEON_FP2_DETECT_INT_POL (1 << 9)
912# define R200_FP2_SOURCE_SEL_MASK (3 << 10) 915# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
913# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) 916# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
914# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) 917# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@@ -988,14 +991,20 @@
988 991
989#define RADEON_GEN_INT_CNTL 0x0040 992#define RADEON_GEN_INT_CNTL 0x0040
990# define RADEON_CRTC_VBLANK_MASK (1 << 0) 993# define RADEON_CRTC_VBLANK_MASK (1 << 0)
994# define RADEON_FP_DETECT_MASK (1 << 4)
991# define RADEON_CRTC2_VBLANK_MASK (1 << 9) 995# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
996# define RADEON_FP2_DETECT_MASK (1 << 10)
992# define RADEON_SW_INT_ENABLE (1 << 25) 997# define RADEON_SW_INT_ENABLE (1 << 25)
993#define RADEON_GEN_INT_STATUS 0x0044 998#define RADEON_GEN_INT_STATUS 0x0044
994# define AVIVO_DISPLAY_INT_STATUS (1 << 0) 999# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
995# define RADEON_CRTC_VBLANK_STAT (1 << 0) 1000# define RADEON_CRTC_VBLANK_STAT (1 << 0)
996# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) 1001# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
1002# define RADEON_FP_DETECT_STAT (1 << 4)
1003# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
997# define RADEON_CRTC2_VBLANK_STAT (1 << 9) 1004# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
998# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) 1005# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
1006# define RADEON_FP2_DETECT_STAT (1 << 10)
1007# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
999# define RADEON_SW_INT_FIRE (1 << 26) 1008# define RADEON_SW_INT_FIRE (1 << 26)
1000# define RADEON_SW_INT_TEST (1 << 25) 1009# define RADEON_SW_INT_TEST (1 << 25)
1001# define RADEON_SW_INT_TEST_ACK (1 << 25) 1010# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1051,20 +1060,25 @@
1051 1060
1052 /* Multimedia I2C bus */ 1061 /* Multimedia I2C bus */
1053#define RADEON_I2C_CNTL_0 0x0090 1062#define RADEON_I2C_CNTL_0 0x0090
1054#define RADEON_I2C_DONE (1<<0) 1063#define RADEON_I2C_DONE (1 << 0)
1055#define RADEON_I2C_NACK (1<<1) 1064#define RADEON_I2C_NACK (1 << 1)
1056#define RADEON_I2C_HALT (1<<2) 1065#define RADEON_I2C_HALT (1 << 2)
1057#define RADEON_I2C_SOFT_RST (1<<5) 1066#define RADEON_I2C_SOFT_RST (1 << 5)
1058#define RADEON_I2C_DRIVE_EN (1<<6) 1067#define RADEON_I2C_DRIVE_EN (1 << 6)
1059#define RADEON_I2C_DRIVE_SEL (1<<7) 1068#define RADEON_I2C_DRIVE_SEL (1 << 7)
1060#define RADEON_I2C_START (1<<8) 1069#define RADEON_I2C_START (1 << 8)
1061#define RADEON_I2C_STOP (1<<9) 1070#define RADEON_I2C_STOP (1 << 9)
1062#define RADEON_I2C_RECEIVE (1<<10) 1071#define RADEON_I2C_RECEIVE (1 << 10)
1063#define RADEON_I2C_ABORT (1<<11) 1072#define RADEON_I2C_ABORT (1 << 11)
1064#define RADEON_I2C_GO (1<<12) 1073#define RADEON_I2C_GO (1 << 12)
1074#define RADEON_I2C_PRESCALE_SHIFT 16
1065#define RADEON_I2C_CNTL_1 0x0094 1075#define RADEON_I2C_CNTL_1 0x0094
1066#define RADEON_I2C_SEL (1<<16) 1076#define RADEON_I2C_DATA_COUNT_SHIFT 0
1067#define RADEON_I2C_EN (1<<17) 1077#define RADEON_I2C_ADDR_COUNT_SHIFT 4
1078#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
1079#define RADEON_I2C_SEL (1 << 16)
1080#define RADEON_I2C_EN (1 << 17)
1081#define RADEON_I2C_TIME_LIMIT_SHIFT 24
1068#define RADEON_I2C_DATA 0x0098 1082#define RADEON_I2C_DATA 0x0098
1069 1083
1070#define RADEON_DVI_I2C_CNTL_0 0x02e0 1084#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1072,7 +1086,7 @@
1072# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ 1086# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
1073# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ 1087# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
1074# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ 1088# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
1075#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ 1089#define RADEON_DVI_I2C_CNTL_1 0x02e4
1076#define RADEON_DVI_I2C_DATA 0x02e8 1090#define RADEON_DVI_I2C_DATA 0x02e8
1077 1091
1078#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ 1092#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
1143# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) 1157# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
1144# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) 1158# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
1145# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) 1159# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
1146#define RADEON_LCD_GPIO_MASK 0x01a0 1160
1147#define RADEON_GPIOPAD_EN 0x01a0
1148#define RADEON_LCD_GPIO_Y_REG 0x01a4
1149#define RADEON_MDGPIO_A_REG 0x01ac
1150#define RADEON_MDGPIO_EN_REG 0x01b0
1151#define RADEON_MDGPIO_MASK 0x0198
1152#define RADEON_GPIOPAD_MASK 0x0198 1161#define RADEON_GPIOPAD_MASK 0x0198
1153#define RADEON_GPIOPAD_A 0x019c 1162#define RADEON_GPIOPAD_A 0x019c
1154#define RADEON_MDGPIO_Y_REG 0x01b4 1163#define RADEON_GPIOPAD_EN 0x01a0
1164#define RADEON_GPIOPAD_Y 0x01a4
1165#define RADEON_MDGPIO_MASK 0x01a8
1166#define RADEON_MDGPIO_A 0x01ac
1167#define RADEON_MDGPIO_EN 0x01b0
1168#define RADEON_MDGPIO_Y 0x01b4
1169
1155#define RADEON_MEM_ADDR_CONFIG 0x0148 1170#define RADEON_MEM_ADDR_CONFIG 0x0148
1156#define RADEON_MEM_BASE 0x0f10 /* PCI */ 1171#define RADEON_MEM_BASE 0x0f10 /* PCI */
1157#define RADEON_MEM_CNTL 0x0140 1172#define RADEON_MEM_CNTL 0x0140
@@ -1360,6 +1375,9 @@
1360#define RADEON_OVR_CLR 0x0230 1375#define RADEON_OVR_CLR 0x0230
1361#define RADEON_OVR_WID_LEFT_RIGHT 0x0234 1376#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
1362#define RADEON_OVR_WID_TOP_BOTTOM 0x0238 1377#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
1378#define RADEON_OVR2_CLR 0x0330
1379#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
1380#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
1363 1381
1364/* first capture unit */ 1382/* first capture unit */
1365 1383
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84b..4d12b2d17b4d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
165 return 0; 165 return 0;
166 /* Allocate 1M object buffer */ 166 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 169 true, RADEON_GEM_DOMAIN_GTT,
170 false, &rdev->ib_pool.robj); 170 &rdev->ib_pool.robj);
171 if (r) { 171 if (r) {
172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
173 return r; 173 return r;
174 } 174 }
175 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 175 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
176 if (unlikely(r != 0))
177 return r;
178 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
176 if (r) { 179 if (r) {
180 radeon_bo_unreserve(rdev->ib_pool.robj);
177 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); 181 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
178 return r; 182 return r;
179 } 183 }
180 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); 184 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
185 radeon_bo_unreserve(rdev->ib_pool.robj);
181 if (r) { 186 if (r) {
182 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); 187 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
183 return r; 188 return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
203 208
204void radeon_ib_pool_fini(struct radeon_device *rdev) 209void radeon_ib_pool_fini(struct radeon_device *rdev)
205{ 210{
211 int r;
212
206 if (!rdev->ib_pool.ready) { 213 if (!rdev->ib_pool.ready) {
207 return; 214 return;
208 } 215 }
209 mutex_lock(&rdev->ib_pool.mutex); 216 mutex_lock(&rdev->ib_pool.mutex);
210 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
211 if (rdev->ib_pool.robj) { 218 if (rdev->ib_pool.robj) {
212 radeon_object_kunmap(rdev->ib_pool.robj); 219 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
213 radeon_object_unref(&rdev->ib_pool.robj); 220 if (likely(r == 0)) {
221 radeon_bo_kunmap(rdev->ib_pool.robj);
222 radeon_bo_unpin(rdev->ib_pool.robj);
223 radeon_bo_unreserve(rdev->ib_pool.robj);
224 }
225 radeon_bo_unref(&rdev->ib_pool.robj);
214 rdev->ib_pool.robj = NULL; 226 rdev->ib_pool.robj = NULL;
215 } 227 }
216 mutex_unlock(&rdev->ib_pool.mutex); 228 mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
288 rdev->cp.ring_size = ring_size; 300 rdev->cp.ring_size = ring_size;
289 /* Allocate ring buffer */ 301 /* Allocate ring buffer */
290 if (rdev->cp.ring_obj == NULL) { 302 if (rdev->cp.ring_obj == NULL) {
291 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, 303 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
292 true, 304 RADEON_GEM_DOMAIN_GTT,
293 RADEON_GEM_DOMAIN_GTT, 305 &rdev->cp.ring_obj);
294 false,
295 &rdev->cp.ring_obj);
296 if (r) { 306 if (r) {
297 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); 307 dev_err(rdev->dev, "(%d) ring create failed\n", r);
298 mutex_unlock(&rdev->cp.mutex);
299 return r; 308 return r;
300 } 309 }
301 r = radeon_object_pin(rdev->cp.ring_obj, 310 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
302 RADEON_GEM_DOMAIN_GTT, 311 if (unlikely(r != 0))
303 &rdev->cp.gpu_addr); 312 return r;
313 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
314 &rdev->cp.gpu_addr);
304 if (r) { 315 if (r) {
305 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); 316 radeon_bo_unreserve(rdev->cp.ring_obj);
306 mutex_unlock(&rdev->cp.mutex); 317 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
307 return r; 318 return r;
308 } 319 }
309 r = radeon_object_kmap(rdev->cp.ring_obj, 320 r = radeon_bo_kmap(rdev->cp.ring_obj,
310 (void **)&rdev->cp.ring); 321 (void **)&rdev->cp.ring);
322 radeon_bo_unreserve(rdev->cp.ring_obj);
311 if (r) { 323 if (r) {
312 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); 324 dev_err(rdev->dev, "(%d) ring map failed\n", r);
313 mutex_unlock(&rdev->cp.mutex);
314 return r; 325 return r;
315 } 326 }
316 } 327 }
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
321 332
322void radeon_ring_fini(struct radeon_device *rdev) 333void radeon_ring_fini(struct radeon_device *rdev)
323{ 334{
335 int r;
336
324 mutex_lock(&rdev->cp.mutex); 337 mutex_lock(&rdev->cp.mutex);
325 if (rdev->cp.ring_obj) { 338 if (rdev->cp.ring_obj) {
326 radeon_object_kunmap(rdev->cp.ring_obj); 339 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
327 radeon_object_unpin(rdev->cp.ring_obj); 340 if (likely(r == 0)) {
328 radeon_object_unref(&rdev->cp.ring_obj); 341 radeon_bo_kunmap(rdev->cp.ring_obj);
342 radeon_bo_unpin(rdev->cp.ring_obj);
343 radeon_bo_unreserve(rdev->cp.ring_obj);
344 }
345 radeon_bo_unref(&rdev->cp.ring_obj);
329 rdev->cp.ring = NULL; 346 rdev->cp.ring = NULL;
330 rdev->cp.ring_obj = NULL; 347 rdev->cp.ring_obj = NULL;
331 } 348 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1cf..391c973ec4db 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev) 31void radeon_test_moves(struct radeon_device *rdev)
32{ 32{
33 struct radeon_object *vram_obj = NULL; 33 struct radeon_bo *vram_obj = NULL;
34 struct radeon_object **gtt_obj = NULL; 34 struct radeon_bo **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL; 35 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr; 36 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size; 37 unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
56 false, &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61 r = radeon_bo_reserve(vram_obj, false);
62 r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 62 if (unlikely(r != 0))
63 goto out_cleanup;
64 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
63 if (r) { 65 if (r) {
64 DRM_ERROR("Failed to pin VRAM object\n"); 66 DRM_ERROR("Failed to pin VRAM object\n");
65 goto out_cleanup; 67 goto out_cleanup;
66 } 68 }
67
68 for (i = 0; i < n; i++) { 69 for (i = 0; i < n; i++) {
69 void *gtt_map, *vram_map; 70 void *gtt_map, *vram_map;
70 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
71 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
72 73
73 r = radeon_object_create(rdev, NULL, size, true, 74 r = radeon_bo_create(rdev, NULL, size, true,
74 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
75 if (r) { 76 if (r) {
76 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
77 goto out_cleanup; 78 goto out_cleanup;
78 } 79 }
79 80
80 r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr); 81 r = radeon_bo_reserve(gtt_obj[i], false);
82 if (unlikely(r != 0))
83 goto out_cleanup;
84 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
81 if (r) { 85 if (r) {
82 DRM_ERROR("Failed to pin GTT object %d\n", i); 86 DRM_ERROR("Failed to pin GTT object %d\n", i);
83 goto out_cleanup; 87 goto out_cleanup;
84 } 88 }
85 89
86 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 90 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
87 if (r) { 91 if (r) {
88 DRM_ERROR("Failed to map GTT object %d\n", i); 92 DRM_ERROR("Failed to map GTT object %d\n", i);
89 goto out_cleanup; 93 goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
94 gtt_start++) 98 gtt_start++)
95 *gtt_start = gtt_start; 99 *gtt_start = gtt_start;
96 100
97 radeon_object_kunmap(gtt_obj[i]); 101 radeon_bo_kunmap(gtt_obj[i]);
98 102
99 r = radeon_fence_create(rdev, &fence); 103 r = radeon_fence_create(rdev, &fence);
100 if (r) { 104 if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
116 120
117 radeon_fence_unref(&fence); 121 radeon_fence_unref(&fence);
118 122
119 r = radeon_object_kmap(vram_obj, &vram_map); 123 r = radeon_bo_kmap(vram_obj, &vram_map);
120 if (r) { 124 if (r) {
121 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 125 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
122 goto out_cleanup; 126 goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
131 "expected 0x%p (GTT map 0x%p-0x%p)\n", 135 "expected 0x%p (GTT map 0x%p-0x%p)\n",
132 i, *vram_start, gtt_start, gtt_map, 136 i, *vram_start, gtt_start, gtt_map,
133 gtt_end); 137 gtt_end);
134 radeon_object_kunmap(vram_obj); 138 radeon_bo_kunmap(vram_obj);
135 goto out_cleanup; 139 goto out_cleanup;
136 } 140 }
137 *vram_start = vram_start; 141 *vram_start = vram_start;
138 } 142 }
139 143
140 radeon_object_kunmap(vram_obj); 144 radeon_bo_kunmap(vram_obj);
141 145
142 r = radeon_fence_create(rdev, &fence); 146 r = radeon_fence_create(rdev, &fence);
143 if (r) { 147 if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
159 163
160 radeon_fence_unref(&fence); 164 radeon_fence_unref(&fence);
161 165
162 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 166 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
163 if (r) { 167 if (r) {
164 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 168 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
165 goto out_cleanup; 169 goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
174 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 178 "expected 0x%p (VRAM map 0x%p-0x%p)\n",
175 i, *gtt_start, vram_start, vram_map, 179 i, *gtt_start, vram_start, vram_map,
176 vram_end); 180 vram_end);
177 radeon_object_kunmap(gtt_obj[i]); 181 radeon_bo_kunmap(gtt_obj[i]);
178 goto out_cleanup; 182 goto out_cleanup;
179 } 183 }
180 } 184 }
181 185
182 radeon_object_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
183 187
184 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
185 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
187 191
188out_cleanup: 192out_cleanup:
189 if (vram_obj) { 193 if (vram_obj) {
190 radeon_object_unpin(vram_obj); 194 if (radeon_bo_is_reserved(vram_obj)) {
191 radeon_object_unref(&vram_obj); 195 radeon_bo_unpin(vram_obj);
196 radeon_bo_unreserve(vram_obj);
197 }
198 radeon_bo_unref(&vram_obj);
192 } 199 }
193 if (gtt_obj) { 200 if (gtt_obj) {
194 for (i = 0; i < n; i++) { 201 for (i = 0; i < n; i++) {
195 if (gtt_obj[i]) { 202 if (gtt_obj[i]) {
196 radeon_object_unpin(gtt_obj[i]); 203 if (radeon_bo_is_reserved(gtt_obj[i])) {
197 radeon_object_unref(&gtt_obj[i]); 204 radeon_bo_unpin(gtt_obj[i]);
205 radeon_bo_unreserve(gtt_obj[i]);
206 }
207 radeon_bo_unref(&gtt_obj[i]);
198 } 208 }
199 } 209 }
200 kfree(gtt_obj); 210 kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
206 printk(KERN_WARNING "Error while testing BO move.\n"); 216 printk(KERN_WARNING "Error while testing BO move.\n");
207 } 217 }
208} 218}
209
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eda4ade24c3a..5a19d529d1c0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = 0; 153 man->gpu_offset = rdev->mc.gtt_location;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = 0; 183 man->gpu_offset = rdev->mc.vram_location;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
197 return 0; 197 return 0;
198} 198}
199 199
200static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) 200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement)
201{ 202{
202 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; 203 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
203
204 switch (bo->mem.mem_type) { 204 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM:
206 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
207 break;
208 case TTM_PL_TT:
205 default: 209 default:
206 return (cur_placement & ~TTM_PL_MASK_CACHING) | 210 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
207 TTM_PL_FLAG_SYSTEM |
208 TTM_PL_FLAG_CACHED;
209 } 211 }
212 *placement = rbo->placement;
210} 213}
211 214
212static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 215static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
283 struct radeon_device *rdev; 286 struct radeon_device *rdev;
284 struct ttm_mem_reg *old_mem = &bo->mem; 287 struct ttm_mem_reg *old_mem = &bo->mem;
285 struct ttm_mem_reg tmp_mem; 288 struct ttm_mem_reg tmp_mem;
286 uint32_t proposed_placement; 289 u32 placements;
290 struct ttm_placement placement;
287 int r; 291 int r;
288 292
289 rdev = radeon_get_rdev(bo->bdev); 293 rdev = radeon_get_rdev(bo->bdev);
290 tmp_mem = *new_mem; 294 tmp_mem = *new_mem;
291 tmp_mem.mm_node = NULL; 295 tmp_mem.mm_node = NULL;
292 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 296 placement.fpfn = 0;
293 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, 297 placement.lpfn = 0;
298 placement.num_placement = 1;
299 placement.placement = &placements;
300 placement.num_busy_placement = 1;
301 placement.busy_placement = &placements;
302 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
303 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
294 interruptible, no_wait); 304 interruptible, no_wait);
295 if (unlikely(r)) { 305 if (unlikely(r)) {
296 return r; 306 return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
329 struct radeon_device *rdev; 339 struct radeon_device *rdev;
330 struct ttm_mem_reg *old_mem = &bo->mem; 340 struct ttm_mem_reg *old_mem = &bo->mem;
331 struct ttm_mem_reg tmp_mem; 341 struct ttm_mem_reg tmp_mem;
332 uint32_t proposed_flags; 342 struct ttm_placement placement;
343 u32 placements;
333 int r; 344 int r;
334 345
335 rdev = radeon_get_rdev(bo->bdev); 346 rdev = radeon_get_rdev(bo->bdev);
336 tmp_mem = *new_mem; 347 tmp_mem = *new_mem;
337 tmp_mem.mm_node = NULL; 348 tmp_mem.mm_node = NULL;
338 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 349 placement.fpfn = 0;
339 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, 350 placement.lpfn = 0;
340 interruptible, no_wait); 351 placement.num_placement = 1;
352 placement.placement = &placements;
353 placement.num_busy_placement = 1;
354 placement.busy_placement = &placements;
355 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
356 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
341 if (unlikely(r)) { 357 if (unlikely(r)) {
342 return r; 358 return r;
343 } 359 }
@@ -407,18 +423,6 @@ memcpy:
407 return r; 423 return r;
408} 424}
409 425
410const uint32_t radeon_mem_prios[] = {
411 TTM_PL_VRAM,
412 TTM_PL_TT,
413 TTM_PL_SYSTEM,
414};
415
416const uint32_t radeon_busy_prios[] = {
417 TTM_PL_TT,
418 TTM_PL_VRAM,
419 TTM_PL_SYSTEM,
420};
421
422static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 426static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
423 bool lazy, bool interruptible) 427 bool lazy, bool interruptible)
424{ 428{
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
446} 450}
447 451
448static struct ttm_bo_driver radeon_bo_driver = { 452static struct ttm_bo_driver radeon_bo_driver = {
449 .mem_type_prio = radeon_mem_prios,
450 .mem_busy_prio = radeon_busy_prios,
451 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
452 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, 453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
454 .invalidate_caches = &radeon_invalidate_caches, 454 .invalidate_caches = &radeon_invalidate_caches,
455 .init_mem_type = &radeon_init_mem_type, 455 .init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
483 return r; 483 return r;
484 } 484 }
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, 485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
486 ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); 486 rdev->mc.real_vram_size >> PAGE_SHIFT);
487 if (r) { 487 if (r) {
488 DRM_ERROR("Failed initializing VRAM heap.\n"); 488 DRM_ERROR("Failed initializing VRAM heap.\n");
489 return r; 489 return r;
490 } 490 }
491 r = radeon_object_create(rdev, NULL, 256 * 1024, true, 491 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM, false, 492 RADEON_GEM_DOMAIN_VRAM,
493 &rdev->stollen_vga_memory); 493 &rdev->stollen_vga_memory);
494 if (r) { 494 if (r) {
495 return r; 495 return r;
496 } 496 }
497 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); 497 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
498 if (r)
499 return r;
500 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
501 radeon_bo_unreserve(rdev->stollen_vga_memory);
498 if (r) { 502 if (r) {
499 radeon_object_unref(&rdev->stollen_vga_memory); 503 radeon_bo_unref(&rdev->stollen_vga_memory);
500 return r; 504 return r;
501 } 505 }
502 DRM_INFO("radeon: %uM of VRAM memory ready\n", 506 DRM_INFO("radeon: %uM of VRAM memory ready\n",
503 (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); 507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
504 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
505 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 509 rdev->mc.gtt_size >> PAGE_SHIFT);
506 if (r) { 510 if (r) {
507 DRM_ERROR("Failed initializing GTT heap.\n"); 511 DRM_ERROR("Failed initializing GTT heap.\n");
508 return r; 512 return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
523 527
524void radeon_ttm_fini(struct radeon_device *rdev) 528void radeon_ttm_fini(struct radeon_device *rdev)
525{ 529{
530 int r;
531
526 if (rdev->stollen_vga_memory) { 532 if (rdev->stollen_vga_memory) {
527 radeon_object_unpin(rdev->stollen_vga_memory); 533 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
528 radeon_object_unref(&rdev->stollen_vga_memory); 534 if (r == 0) {
535 radeon_bo_unpin(rdev->stollen_vga_memory);
536 radeon_bo_unreserve(rdev->stollen_vga_memory);
537 }
538 radeon_bo_unref(&rdev->stollen_vga_memory);
529 } 539 }
530 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); 540 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
531 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); 541 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca037160a582..c1fcdddb6be6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
352 u32 tmp; 352 u32 tmp;
353 353
354 /* Setup GPU memory space */ 354 /* Setup GPU memory space */
355 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); 355 tmp = RREG32(R_00015C_NB_TOM);
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; 356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL; 357 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev); 358 r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
387 r300_clock_startup(rdev); 387 r300_clock_startup(rdev);
388 /* Initialize GPU configuration (# pipes, ...) */ 388 /* Initialize GPU configuration (# pipes, ...) */
389 rs400_gpu_init(rdev); 389 rs400_gpu_init(rdev);
390 r100_enable_bm(rdev);
390 /* Initialize GART (initialize after TTM so we can allocate 391 /* Initialize GART (initialize after TTM so we can allocate
391 * memory through TTM but finalize after TTM) */ 392 * memory through TTM but finalize after TTM) */
392 r = rs400_gart_enable(rdev); 393 r = rs400_gart_enable(rdev);
393 if (r) 394 if (r)
394 return r; 395 return r;
395 /* Enable IRQ */ 396 /* Enable IRQ */
396 rdev->irq.sw_int = true;
397 r100_irq_set(rdev); 397 r100_irq_set(rdev);
398 /* 1M ring buffer */ 398 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024); 399 r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
430 radeon_combios_asic_init(rdev->ddev); 430 radeon_combios_asic_init(rdev->ddev);
431 /* Resume clock after posting */ 431 /* Resume clock after posting */
432 r300_clock_startup(rdev); 432 r300_clock_startup(rdev);
433 /* Initialize surface registers */
434 radeon_surface_init(rdev);
433 return rs400_startup(rdev); 435 return rs400_startup(rdev);
434} 436}
435 437
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
452 rs400_gart_fini(rdev); 454 rs400_gart_fini(rdev);
453 radeon_irq_kms_fini(rdev); 455 radeon_irq_kms_fini(rdev);
454 radeon_fence_driver_fini(rdev); 456 radeon_fence_driver_fini(rdev);
455 radeon_object_fini(rdev); 457 radeon_bo_fini(rdev);
456 radeon_atombios_fini(rdev); 458 radeon_atombios_fini(rdev);
457 kfree(rdev->bios); 459 kfree(rdev->bios);
458 rdev->bios = NULL; 460 rdev->bios = NULL;
@@ -490,10 +492,9 @@ int rs400_init(struct radeon_device *rdev)
490 RREG32(R_0007C0_CP_STAT)); 492 RREG32(R_0007C0_CP_STAT));
491 } 493 }
492 /* check if cards are posted or not */ 494 /* check if cards are posted or not */
493 if (!radeon_card_posted(rdev) && rdev->bios) { 495 if (radeon_boot_test_post_card(rdev) == false)
494 DRM_INFO("GPU not posted. posting now...\n"); 496 return -EINVAL;
495 radeon_combios_asic_init(rdev->ddev); 497
496 }
497 /* Initialize clocks */ 498 /* Initialize clocks */
498 radeon_get_clock_info(rdev->ddev); 499 radeon_get_clock_info(rdev->ddev);
499 /* Get vram informations */ 500 /* Get vram informations */
@@ -510,7 +511,7 @@ int rs400_init(struct radeon_device *rdev)
510 if (r) 511 if (r)
511 return r; 512 return r;
512 /* Memory manager */ 513 /* Memory manager */
513 r = radeon_object_init(rdev); 514 r = radeon_bo_init(rdev);
514 if (r) 515 if (r)
515 return r; 516 return r;
516 r = rs400_gart_init(rdev); 517 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd8736a..4f8ea4260572 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48int rs600_mc_init(struct radeon_device *rdev)
49{
50 /* read back the MC value from the hw */
51 int r;
52 u32 tmp;
53
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 if (r)
60 return r;
61 return 0;
62}
63
64/* hpd for digital panel detect/disconnect */
65bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
66{
67 u32 tmp;
68 bool connected = false;
69
70 switch (hpd) {
71 case RADEON_HPD_1:
72 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
73 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
74 connected = true;
75 break;
76 case RADEON_HPD_2:
77 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
78 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
79 connected = true;
80 break;
81 default:
82 break;
83 }
84 return connected;
85}
86
87void rs600_hpd_set_polarity(struct radeon_device *rdev,
88 enum radeon_hpd_id hpd)
89{
90 u32 tmp;
91 bool connected = rs600_hpd_sense(rdev, hpd);
92
93 switch (hpd) {
94 case RADEON_HPD_1:
95 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
96 if (connected)
97 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
98 else
99 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
100 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
101 break;
102 case RADEON_HPD_2:
103 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
104 if (connected)
105 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
106 else
107 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
108 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
109 break;
110 default:
111 break;
112 }
113}
114
115void rs600_hpd_init(struct radeon_device *rdev)
116{
117 struct drm_device *dev = rdev->ddev;
118 struct drm_connector *connector;
119
120 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
121 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
122 switch (radeon_connector->hpd.hpd) {
123 case RADEON_HPD_1:
124 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
125 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
126 rdev->irq.hpd[0] = true;
127 break;
128 case RADEON_HPD_2:
129 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
130 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
131 rdev->irq.hpd[1] = true;
132 break;
133 default:
134 break;
135 }
136 }
137 rs600_irq_set(rdev);
138}
139
140void rs600_hpd_fini(struct radeon_device *rdev)
141{
142 struct drm_device *dev = rdev->ddev;
143 struct drm_connector *connector;
144
145 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
146 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
147 switch (radeon_connector->hpd.hpd) {
148 case RADEON_HPD_1:
149 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
150 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
151 rdev->irq.hpd[0] = false;
152 break;
153 case RADEON_HPD_2:
154 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
155 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
156 rdev->irq.hpd[1] = false;
157 break;
158 default:
159 break;
160 }
161 }
162}
163
48/* 164/*
49 * GART. 165 * GART.
50 */ 166 */
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
100 WREG32(R_00004C_BUS_CNTL, tmp); 216 WREG32(R_00004C_BUS_CNTL, tmp);
101 /* FIXME: setup default page */ 217 /* FIXME: setup default page */
102 WREG32_MC(R_000100_MC_PT0_CNTL, 218 WREG32_MC(R_000100_MC_PT0_CNTL,
103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 219 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 220 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
221
105 for (i = 0; i < 19; i++) { 222 for (i = 0; i < 19; i++) {
106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 223 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 224 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
108 S_00016C_SYSTEM_ACCESS_MODE_MASK( 225 S_00016C_SYSTEM_ACCESS_MODE_MASK(
109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | 226 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 227 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | 228 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | 229 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 230 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); 231 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
115 } 232 }
116
117 /* System context map to GART space */
118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
120
121 /* enable first context */ 233 /* enable first context */
122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 234 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
125 S_000102_ENABLE_PAGE_TABLE(1) | 235 S_000102_ENABLE_PAGE_TABLE(1) |
126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 236 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
237
127 /* disable all other contexts */ 238 /* disable all other contexts */
128 for (i = 1; i < 8; i++) { 239 for (i = 1; i < 8; i++)
129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 240 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
130 }
131 241
132 /* setup the page table */ 242 /* setup the page table */
133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 243 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
134 rdev->gart.table_addr); 244 rdev->gart.table_addr);
245 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
246 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 247 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
136 248
249 /* System context maps to VRAM space */
250 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
251 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
252
137 /* enable page tables */ 253 /* enable page tables */
138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 254 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 255 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
146 262
147void rs600_gart_disable(struct radeon_device *rdev) 263void rs600_gart_disable(struct radeon_device *rdev)
148{ 264{
149 uint32_t tmp; 265 u32 tmp;
266 int r;
150 267
151 /* FIXME: disable out of gart access */ 268 /* FIXME: disable out of gart access */
152 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 269 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
153 tmp = RREG32_MC(R_000009_MC_CNTL1); 270 tmp = RREG32_MC(R_000009_MC_CNTL1);
154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 271 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
155 if (rdev->gart.table.vram.robj) { 272 if (rdev->gart.table.vram.robj) {
156 radeon_object_kunmap(rdev->gart.table.vram.robj); 273 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
157 radeon_object_unpin(rdev->gart.table.vram.robj); 274 if (r == 0) {
275 radeon_bo_kunmap(rdev->gart.table.vram.robj);
276 radeon_bo_unpin(rdev->gart.table.vram.robj);
277 radeon_bo_unreserve(rdev->gart.table.vram.robj);
278 }
158 } 279 }
159} 280}
160 281
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
189{ 310{
190 uint32_t tmp = 0; 311 uint32_t tmp = 0;
191 uint32_t mode_int = 0; 312 uint32_t mode_int = 0;
313 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
314 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
315 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
316 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
192 317
193 if (rdev->irq.sw_int) { 318 if (rdev->irq.sw_int) {
194 tmp |= S_000040_SW_INT_EN(1); 319 tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
199 if (rdev->irq.crtc_vblank_int[1]) { 324 if (rdev->irq.crtc_vblank_int[1]) {
200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 325 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
201 } 326 }
327 if (rdev->irq.hpd[0]) {
328 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
329 }
330 if (rdev->irq.hpd[1]) {
331 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
332 }
202 WREG32(R_000040_GEN_INT_CNTL, tmp); 333 WREG32(R_000040_GEN_INT_CNTL, tmp);
203 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 334 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
335 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
336 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
204 return 0; 337 return 0;
205} 338}
206 339
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
208{ 341{
209 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 342 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
210 uint32_t irq_mask = ~C_000044_SW_INT; 343 uint32_t irq_mask = ~C_000044_SW_INT;
344 u32 tmp;
211 345
212 if (G_000044_DISPLAY_INT_STAT(irqs)) { 346 if (G_000044_DISPLAY_INT_STAT(irqs)) {
213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 347 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
219 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 353 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1)); 354 S_006D34_D2MODE_VBLANK_ACK(1));
221 } 355 }
356 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
357 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
358 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
359 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
360 }
361 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
362 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
363 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
364 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
365 }
222 } else { 366 } else {
223 *r500_disp_int = 0; 367 *r500_disp_int = 0;
224 } 368 }
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
244{ 388{
245 uint32_t status, msi_rearm; 389 uint32_t status, msi_rearm;
246 uint32_t r500_disp_int; 390 uint32_t r500_disp_int;
391 bool queue_hotplug = false;
247 392
248 status = rs600_irq_ack(rdev, &r500_disp_int); 393 status = rs600_irq_ack(rdev, &r500_disp_int);
249 if (!status && !r500_disp_int) { 394 if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
258 drm_handle_vblank(rdev->ddev, 0); 403 drm_handle_vblank(rdev->ddev, 0);
259 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) 404 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
260 drm_handle_vblank(rdev->ddev, 1); 405 drm_handle_vblank(rdev->ddev, 1);
406 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
407 queue_hotplug = true;
408 DRM_DEBUG("HPD1\n");
409 }
410 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
411 queue_hotplug = true;
412 DRM_DEBUG("HPD2\n");
413 }
261 status = rs600_irq_ack(rdev, &r500_disp_int); 414 status = rs600_irq_ack(rdev, &r500_disp_int);
262 } 415 }
416 if (queue_hotplug)
417 queue_work(rdev->wq, &rdev->hotplug_work);
263 if (rdev->msi_enabled) { 418 if (rdev->msi_enabled) {
264 switch (rdev->family) { 419 switch (rdev->family) {
265 case CHIP_RS600: 420 case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
301 456
302void rs600_gpu_init(struct radeon_device *rdev) 457void rs600_gpu_init(struct radeon_device *rdev)
303{ 458{
304 /* FIXME: HDP same place on rs600 ? */
305 r100_hdp_reset(rdev); 459 r100_hdp_reset(rdev);
306 /* FIXME: is this correct ? */
307 r420_pipes_init(rdev); 460 r420_pipes_init(rdev);
308 /* Wait for mc idle */ 461 /* Wait for mc idle */
309 if (rs600_mc_wait_for_idle(rdev)) 462 if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
312 465
313void rs600_vram_info(struct radeon_device *rdev) 466void rs600_vram_info(struct radeon_device *rdev)
314{ 467{
315 /* FIXME: to do or is these values sane ? */
316 rdev->mc.vram_is_ddr = true; 468 rdev->mc.vram_is_ddr = true;
317 rdev->mc.vram_width = 128; 469 rdev->mc.vram_width = 128;
470
471 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
472 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
473
474 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
475 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
476
477 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
478 rdev->mc.mc_vram_size = rdev->mc.aper_size;
479
480 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
481 rdev->mc.real_vram_size = rdev->mc.aper_size;
318} 482}
319 483
320void rs600_bandwidth_update(struct radeon_device *rdev) 484void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
388 if (r) 552 if (r)
389 return r; 553 return r;
390 /* Enable IRQ */ 554 /* Enable IRQ */
391 rdev->irq.sw_int = true;
392 rs600_irq_set(rdev); 555 rs600_irq_set(rdev);
393 /* 1M ring buffer */ 556 /* 1M ring buffer */
394 r = r100_cp_init(rdev, 1024 * 1024); 557 r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
423 atom_asic_init(rdev->mode_info.atom_context); 586 atom_asic_init(rdev->mode_info.atom_context);
424 /* Resume clock after posting */ 587 /* Resume clock after posting */
425 rv515_clock_startup(rdev); 588 rv515_clock_startup(rdev);
589 /* Initialize surface registers */
590 radeon_surface_init(rdev);
426 return rs600_startup(rdev); 591 return rs600_startup(rdev);
427} 592}
428 593
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
445 rs600_gart_fini(rdev); 610 rs600_gart_fini(rdev);
446 radeon_irq_kms_fini(rdev); 611 radeon_irq_kms_fini(rdev);
447 radeon_fence_driver_fini(rdev); 612 radeon_fence_driver_fini(rdev);
448 radeon_object_fini(rdev); 613 radeon_bo_fini(rdev);
449 radeon_atombios_fini(rdev); 614 radeon_atombios_fini(rdev);
450 kfree(rdev->bios); 615 kfree(rdev->bios);
451 rdev->bios = NULL; 616 rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
482 RREG32(R_0007C0_CP_STAT)); 647 RREG32(R_0007C0_CP_STAT));
483 } 648 }
484 /* check if cards are posted or not */ 649 /* check if cards are posted or not */
485 if (!radeon_card_posted(rdev) && rdev->bios) { 650 if (radeon_boot_test_post_card(rdev) == false)
486 DRM_INFO("GPU not posted. posting now...\n"); 651 return -EINVAL;
487 atom_asic_init(rdev->mode_info.atom_context); 652
488 }
489 /* Initialize clocks */ 653 /* Initialize clocks */
490 radeon_get_clock_info(rdev->ddev); 654 radeon_get_clock_info(rdev->ddev);
491 /* Initialize power management */ 655 /* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
493 /* Get vram informations */ 657 /* Get vram informations */
494 rs600_vram_info(rdev); 658 rs600_vram_info(rdev);
495 /* Initialize memory controller (also test AGP) */ 659 /* Initialize memory controller (also test AGP) */
496 r = r420_mc_init(rdev); 660 r = rs600_mc_init(rdev);
497 if (r) 661 if (r)
498 return r; 662 return r;
499 rs600_debugfs(rdev); 663 rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
505 if (r) 669 if (r)
506 return r; 670 return r;
507 /* Memory manager */ 671 /* Memory manager */
508 r = radeon_object_init(rdev); 672 r = radeon_bo_init(rdev);
509 if (r) 673 if (r)
510 return r; 674 return r;
511 r = rs600_gart_init(rdev); 675 r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 81308924859a..c1c8f5885cbb 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
30 30
31/* Registers */ 31/* Registers */
32#define R_000040_GEN_INT_CNTL 0x000040 32#define R_000040_GEN_INT_CNTL 0x000040
33#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) 33#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
34#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) 34#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
35#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE 35#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
36#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) 36#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
37#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) 37#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
38#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF 38#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
39#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
40#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
41#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
42#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
43#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
44#define C_000040_SNAPSHOT2 0xFFFFFF7F
45#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
46#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
47#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
48#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
49#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
50#define C_000040_FP2_DETECT 0xFFFFFBFF
51#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
52#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
53#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
54#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) 39#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
55#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) 40#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
56#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF 41#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@@ -370,7 +355,90 @@
370#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) 355#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
371#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) 356#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
372#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF 357#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
373 358#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
359#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
360#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
361#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
362#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
363#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
364#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
365#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
366#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
367#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
368#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
369#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
370#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
371#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
372#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
373#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
374#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
375#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
376#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
377#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
378#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
379#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
380#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
381#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
382#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
383#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
384#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
385#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
386#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
387#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
388#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
389#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
390#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
391#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
392#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
393#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
394#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
395#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
396#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
397#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
398#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
399#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
400#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
401#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
402#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
403#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
404#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
405#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
406#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
407#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
408#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
409#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
410#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
411#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
412#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
413#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
414#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
415#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
416#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
417#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
418#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
419#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
420#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
421#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
422#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
423#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
424#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
425#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
426#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
427#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
428#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
429#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
430#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
431#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
432#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
433#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
434#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
435#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
436#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
437#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
438#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
439#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
440#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
441#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
374 442
375/* MC registers */ 443/* MC registers */
376#define R_000000_MC_STATUS 0x000000 444#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 27547175cf93..1e22f52d6039 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
131 131
132void rs690_vram_info(struct radeon_device *rdev) 132void rs690_vram_info(struct radeon_device *rdev)
133{ 133{
134 uint32_t tmp;
135 fixed20_12 a; 134 fixed20_12 a;
136 135
137 rs400_gart_adjust_size(rdev); 136 rs400_gart_adjust_size(rdev);
138 /* DDR for all card after R300 & IGP */ 137
139 rdev->mc.vram_is_ddr = true; 138 rdev->mc.vram_is_ddr = true;
140 /* FIXME: is this correct for RS690/RS740 ? */ 139 rdev->mc.vram_width = 128;
141 tmp = RREG32(RADEON_MEM_CNTL); 140
142 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
143 rdev->mc.vram_width = 128;
144 } else {
145 rdev->mc.vram_width = 64;
146 }
147 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 141 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
148 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 142 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
149 143
150 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 144 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
151 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 145 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
146
147 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
148 rdev->mc.mc_vram_size = rdev->mc.aper_size;
149
150 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
151 rdev->mc.real_vram_size = rdev->mc.aper_size;
152
152 rs690_pm_info(rdev); 153 rs690_pm_info(rdev);
153 /* FIXME: we should enforce default clock in case GPU is not in 154 /* FIXME: we should enforce default clock in case GPU is not in
154 * default setup 155 * default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
161 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 162 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
162} 163}
163 164
165static int rs690_mc_init(struct radeon_device *rdev)
166{
167 int r;
168 u32 tmp;
169
170 /* Setup GPU memory space */
171 tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev);
175 if (r)
176 return r;
177 return 0;
178}
179
164void rs690_line_buffer_adjust(struct radeon_device *rdev, 180void rs690_line_buffer_adjust(struct radeon_device *rdev,
165 struct drm_display_mode *mode1, 181 struct drm_display_mode *mode1,
166 struct drm_display_mode *mode2) 182 struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
244 260
245 b.full = rfixed_const(mode->crtc_hdisplay); 261 b.full = rfixed_const(mode->crtc_hdisplay);
246 c.full = rfixed_const(256); 262 c.full = rfixed_const(256);
247 a.full = rfixed_mul(wm->num_line_pair, b); 263 a.full = rfixed_div(b, c);
248 request_fifo_depth.full = rfixed_div(a, c); 264 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
265 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
249 if (a.full < rfixed_const(4)) { 266 if (a.full < rfixed_const(4)) {
250 wm->lb_request_fifo_depth = 4; 267 wm->lb_request_fifo_depth = 4;
251 } else { 268 } else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
374 a.full = rfixed_const(16); 391 a.full = rfixed_const(16);
375 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 392 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
376 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 393 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
394 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
377 395
378 /* Determine estimated width */ 396 /* Determine estimated width */
379 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 397 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
383 } else { 401 } else {
384 a.full = rfixed_const(16); 402 a.full = rfixed_const(16);
385 wm->priority_mark.full = rfixed_div(estimated_width, a); 403 wm->priority_mark.full = rfixed_div(estimated_width, a);
404 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
386 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 405 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
387 } 406 }
388} 407}
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
605 if (r) 624 if (r)
606 return r; 625 return r;
607 /* Enable IRQ */ 626 /* Enable IRQ */
608 rdev->irq.sw_int = true;
609 rs600_irq_set(rdev); 627 rs600_irq_set(rdev);
610 /* 1M ring buffer */ 628 /* 1M ring buffer */
611 r = r100_cp_init(rdev, 1024 * 1024); 629 r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
640 atom_asic_init(rdev->mode_info.atom_context); 658 atom_asic_init(rdev->mode_info.atom_context);
641 /* Resume clock after posting */ 659 /* Resume clock after posting */
642 rv515_clock_startup(rdev); 660 rv515_clock_startup(rdev);
661 /* Initialize surface registers */
662 radeon_surface_init(rdev);
643 return rs690_startup(rdev); 663 return rs690_startup(rdev);
644} 664}
645 665
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
662 rs400_gart_fini(rdev); 682 rs400_gart_fini(rdev);
663 radeon_irq_kms_fini(rdev); 683 radeon_irq_kms_fini(rdev);
664 radeon_fence_driver_fini(rdev); 684 radeon_fence_driver_fini(rdev);
665 radeon_object_fini(rdev); 685 radeon_bo_fini(rdev);
666 radeon_atombios_fini(rdev); 686 radeon_atombios_fini(rdev);
667 kfree(rdev->bios); 687 kfree(rdev->bios);
668 rdev->bios = NULL; 688 rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
700 RREG32(R_0007C0_CP_STAT)); 720 RREG32(R_0007C0_CP_STAT));
701 } 721 }
702 /* check if cards are posted or not */ 722 /* check if cards are posted or not */
703 if (!radeon_card_posted(rdev) && rdev->bios) { 723 if (radeon_boot_test_post_card(rdev) == false)
704 DRM_INFO("GPU not posted. posting now...\n"); 724 return -EINVAL;
705 atom_asic_init(rdev->mode_info.atom_context); 725
706 }
707 /* Initialize clocks */ 726 /* Initialize clocks */
708 radeon_get_clock_info(rdev->ddev); 727 radeon_get_clock_info(rdev->ddev);
709 /* Initialize power management */ 728 /* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
711 /* Get vram informations */ 730 /* Get vram informations */
712 rs690_vram_info(rdev); 731 rs690_vram_info(rdev);
713 /* Initialize memory controller (also test AGP) */ 732 /* Initialize memory controller (also test AGP) */
714 r = r420_mc_init(rdev); 733 r = rs690_mc_init(rdev);
715 if (r) 734 if (r)
716 return r; 735 return r;
717 rv515_debugfs(rdev); 736 rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
723 if (r) 742 if (r)
724 return r; 743 return r;
725 /* Memory manager */ 744 /* Memory manager */
726 r = radeon_object_init(rdev); 745 r = radeon_bo_init(rdev);
727 if (r) 746 if (r)
728 return r; 747 return r;
729 r = rs400_gart_init(rdev); 748 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9fe90a1..59632a506b46 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
478 return r; 478 return r;
479 } 479 }
480 /* Enable IRQ */ 480 /* Enable IRQ */
481 rdev->irq.sw_int = true;
482 rs600_irq_set(rdev); 481 rs600_irq_set(rdev);
483 /* 1M ring buffer */ 482 /* 1M ring buffer */
484 r = r100_cp_init(rdev, 1024 * 1024); 483 r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
514 atom_asic_init(rdev->mode_info.atom_context); 513 atom_asic_init(rdev->mode_info.atom_context);
515 /* Resume clock after posting */ 514 /* Resume clock after posting */
516 rv515_clock_startup(rdev); 515 rv515_clock_startup(rdev);
516 /* Initialize surface registers */
517 radeon_surface_init(rdev);
517 return rv515_startup(rdev); 518 return rv515_startup(rdev);
518} 519}
519 520
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
540 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
541 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
542 radeon_gem_fini(rdev); 543 radeon_gem_fini(rdev);
543 rv370_pcie_gart_fini(rdev); 544 rv370_pcie_gart_fini(rdev);
544 radeon_agp_fini(rdev); 545 radeon_agp_fini(rdev);
545 radeon_irq_kms_fini(rdev); 546 radeon_irq_kms_fini(rdev);
546 radeon_fence_driver_fini(rdev); 547 radeon_fence_driver_fini(rdev);
547 radeon_object_fini(rdev); 548 radeon_bo_fini(rdev);
548 radeon_atombios_fini(rdev); 549 radeon_atombios_fini(rdev);
549 kfree(rdev->bios); 550 kfree(rdev->bios);
550 rdev->bios = NULL; 551 rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
580 RREG32(R_0007C0_CP_STAT)); 581 RREG32(R_0007C0_CP_STAT));
581 } 582 }
582 /* check if cards are posted or not */ 583 /* check if cards are posted or not */
583 if (!radeon_card_posted(rdev) && rdev->bios) { 584 if (radeon_boot_test_post_card(rdev) == false)
584 DRM_INFO("GPU not posted. posting now...\n"); 585 return -EINVAL;
585 atom_asic_init(rdev->mode_info.atom_context);
586 }
587 /* Initialize clocks */ 586 /* Initialize clocks */
588 radeon_get_clock_info(rdev->ddev); 587 radeon_get_clock_info(rdev->ddev);
589 /* Initialize power management */ 588 /* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
603 if (r) 602 if (r)
604 return r; 603 return r;
605 /* Memory manager */ 604 /* Memory manager */
606 r = radeon_object_init(rdev); 605 r = radeon_bo_init(rdev);
607 if (r) 606 if (r)
608 return r; 607 return r;
609 r = rv370_pcie_gart_init(rdev); 608 r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
892 891
893 b.full = rfixed_const(mode->crtc_hdisplay); 892 b.full = rfixed_const(mode->crtc_hdisplay);
894 c.full = rfixed_const(256); 893 c.full = rfixed_const(256);
895 a.full = rfixed_mul(wm->num_line_pair, b); 894 a.full = rfixed_div(b, c);
896 request_fifo_depth.full = rfixed_div(a, c); 895 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
896 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
897 if (a.full < rfixed_const(4)) { 897 if (a.full < rfixed_const(4)) {
898 wm->lb_request_fifo_depth = 4; 898 wm->lb_request_fifo_depth = 4;
899 } else { 899 } else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
995 a.full = rfixed_const(16); 995 a.full = rfixed_const(16);
996 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 996 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
997 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 997 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
998 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
998 999
999 /* Determine estimated width */ 1000 /* Determine estimated width */
1000 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 1001 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
1001 estimated_width.full = rfixed_div(estimated_width, consumption_time); 1002 estimated_width.full = rfixed_div(estimated_width, consumption_time);
1002 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 1003 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1003 wm->priority_mark.full = rfixed_const(10); 1004 wm->priority_mark.full = wm->priority_mark_max.full;
1004 } else { 1005 } else {
1005 a.full = rfixed_const(16); 1006 a.full = rfixed_const(16);
1006 wm->priority_mark.full = rfixed_div(estimated_width, a); 1007 wm->priority_mark.full = rfixed_div(estimated_width, a);
1008 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
1007 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 1009 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1008 } 1010 }
1009} 1011}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5e06ee7076f5..fbb0357f1ec3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
92void rv770_pcie_gart_disable(struct radeon_device *rdev) 92void rv770_pcie_gart_disable(struct radeon_device *rdev)
93{ 93{
94 u32 tmp; 94 u32 tmp;
95 int i; 95 int i, r;
96 96
97 /* Disable all tables */ 97 /* Disable all tables */
98 for (i = 0; i < 7; i++) 98 for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 if (rdev->gart.table.vram.robj) { 115 if (rdev->gart.table.vram.robj) {
116 radeon_object_kunmap(rdev->gart.table.vram.robj); 116 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
117 radeon_object_unpin(rdev->gart.table.vram.robj); 117 if (likely(r == 0)) {
118 radeon_bo_kunmap(rdev->gart.table.vram.robj);
119 radeon_bo_unpin(rdev->gart.table.vram.robj);
120 radeon_bo_unreserve(rdev->gart.table.vram.robj);
121 }
118 } 122 }
119} 123}
120 124
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
870{ 874{
871 int r; 875 int r;
872 876
877 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
878 r = r600_init_microcode(rdev);
879 if (r) {
880 DRM_ERROR("Failed to load firmware!\n");
881 return r;
882 }
883 }
884
873 rv770_mc_program(rdev); 885 rv770_mc_program(rdev);
874 if (rdev->flags & RADEON_IS_AGP) { 886 if (rdev->flags & RADEON_IS_AGP) {
875 rv770_agp_enable(rdev); 887 rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@ static int rv770_startup(struct radeon_device *rdev)
880 } 892 }
881 rv770_gpu_init(rdev); 893 rv770_gpu_init(rdev);
882 894
883 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 895 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
884 &rdev->r600_blit.shader_gpu_addr); 896 if (unlikely(r != 0))
897 return r;
898 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
899 &rdev->r600_blit.shader_gpu_addr);
900 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
885 if (r) { 901 if (r) {
886 DRM_ERROR("failed to pin blit object %d\n", r); 902 DRM_ERROR("failed to pin blit object %d\n", r);
887 return r; 903 return r;
888 } 904 }
889 905
906 /* Enable IRQ */
907 r = r600_irq_init(rdev);
908 if (r) {
909 DRM_ERROR("radeon: IH init failed (%d).\n", r);
910 radeon_irq_kms_fini(rdev);
911 return r;
912 }
913 r600_irq_set(rdev);
914
890 r = radeon_ring_init(rdev, rdev->cp.ring_size); 915 r = radeon_ring_init(rdev, rdev->cp.ring_size);
891 if (r) 916 if (r)
892 return r; 917 return r;
@@ -934,13 +959,19 @@ int rv770_resume(struct radeon_device *rdev)
934 959
935int rv770_suspend(struct radeon_device *rdev) 960int rv770_suspend(struct radeon_device *rdev)
936{ 961{
962 int r;
963
937 /* FIXME: we should wait for ring to be empty */ 964 /* FIXME: we should wait for ring to be empty */
938 r700_cp_stop(rdev); 965 r700_cp_stop(rdev);
939 rdev->cp.ready = false; 966 rdev->cp.ready = false;
940 r600_wb_disable(rdev); 967 r600_wb_disable(rdev);
941 rv770_pcie_gart_disable(rdev); 968 rv770_pcie_gart_disable(rdev);
942 /* unpin shaders bo */ 969 /* unpin shaders bo */
943 radeon_object_unpin(rdev->r600_blit.shader_obj); 970 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
971 if (likely(r == 0)) {
972 radeon_bo_unpin(rdev->r600_blit.shader_obj);
973 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
974 }
944 return 0; 975 return 0;
945} 976}
946 977
@@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev)
975 if (r) 1006 if (r)
976 return r; 1007 return r;
977 /* Post card if necessary */ 1008 /* Post card if necessary */
978 if (!r600_card_posted(rdev) && rdev->bios) { 1009 if (!r600_card_posted(rdev)) {
1010 if (!rdev->bios) {
1011 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1012 return -EINVAL;
1013 }
979 DRM_INFO("GPU not posted. posting now...\n"); 1014 DRM_INFO("GPU not posted. posting now...\n");
980 atom_asic_init(rdev->mode_info.atom_context); 1015 atom_asic_init(rdev->mode_info.atom_context);
981 } 1016 }
@@ -998,31 +1033,31 @@ int rv770_init(struct radeon_device *rdev)
998 if (r) 1033 if (r)
999 return r; 1034 return r;
1000 /* Memory manager */ 1035 /* Memory manager */
1001 r = radeon_object_init(rdev); 1036 r = radeon_bo_init(rdev);
1037 if (r)
1038 return r;
1039
1040 r = radeon_irq_kms_init(rdev);
1002 if (r) 1041 if (r)
1003 return r; 1042 return r;
1043
1004 rdev->cp.ring_obj = NULL; 1044 rdev->cp.ring_obj = NULL;
1005 r600_ring_init(rdev, 1024 * 1024); 1045 r600_ring_init(rdev, 1024 * 1024);
1006 1046
1007 if (!rdev->me_fw || !rdev->pfp_fw) { 1047 rdev->ih.ring_obj = NULL;
1008 r = r600_cp_init_microcode(rdev); 1048 r600_ih_ring_init(rdev, 64 * 1024);
1009 if (r) {
1010 DRM_ERROR("Failed to load firmware!\n");
1011 return r;
1012 }
1013 }
1014 1049
1015 r = r600_pcie_gart_init(rdev); 1050 r = r600_pcie_gart_init(rdev);
1016 if (r) 1051 if (r)
1017 return r; 1052 return r;
1018 1053
1019 rdev->accel_working = true;
1020 r = r600_blit_init(rdev); 1054 r = r600_blit_init(rdev);
1021 if (r) { 1055 if (r) {
1022 DRM_ERROR("radeon: failled blitter (%d).\n", r); 1056 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1023 rdev->accel_working = false; 1057 return r;
1024 } 1058 }
1025 1059
1060 rdev->accel_working = true;
1026 r = rv770_startup(rdev); 1061 r = rv770_startup(rdev);
1027 if (r) { 1062 if (r) {
1028 rv770_suspend(rdev); 1063 rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
1034 if (rdev->accel_working) { 1069 if (rdev->accel_working) {
1035 r = radeon_ib_pool_init(rdev); 1070 r = radeon_ib_pool_init(rdev);
1036 if (r) { 1071 if (r) {
1037 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1072 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1038 rdev->accel_working = false; 1073 rdev->accel_working = false;
1039 } 1074 }
1040 r = r600_ib_test(rdev); 1075 r = r600_ib_test(rdev);
1041 if (r) { 1076 if (r) {
1042 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1077 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1043 rdev->accel_working = false; 1078 rdev->accel_working = false;
1044 } 1079 }
1045 } 1080 }
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
1051 rv770_suspend(rdev); 1086 rv770_suspend(rdev);
1052 1087
1053 r600_blit_fini(rdev); 1088 r600_blit_fini(rdev);
1089 r600_irq_fini(rdev);
1090 radeon_irq_kms_fini(rdev);
1054 radeon_ring_fini(rdev); 1091 radeon_ring_fini(rdev);
1055 r600_wb_fini(rdev); 1092 r600_wb_fini(rdev);
1056 rv770_pcie_gart_fini(rdev); 1093 rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
1059 radeon_clocks_fini(rdev); 1096 radeon_clocks_fini(rdev);
1060 if (rdev->flags & RADEON_IS_AGP) 1097 if (rdev->flags & RADEON_IS_AGP)
1061 radeon_agp_fini(rdev); 1098 radeon_agp_fini(rdev);
1062 radeon_object_fini(rdev); 1099 radeon_bo_fini(rdev);
1063 radeon_atombios_fini(rdev); 1100 radeon_atombios_fini(rdev);
1064 kfree(rdev->bios); 1101 kfree(rdev->bios);
1065 rdev->bios = NULL; 1102 rdev->bios = NULL;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c2..1e138f5bae09 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
3 3
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o
7 8
8obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d464..a835b6fe42a1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
30 38
31#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
51 .mode = S_IRUGO 59 .mode = S_IRUGO
52}; 60};
53 61
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75 struct ttm_mem_type_manager *man)
76{
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching);
88 spin_lock(&glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&glob->lru_lock);
91}
92
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man);
113 }
114}
115
54static ssize_t ttm_bo_global_show(struct kobject *kobj, 116static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr, 117 struct attribute *attr,
56 char *buffer) 118 char *buffer)
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
117 ret = wait_event_interruptible(bo->event_queue, 179 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0); 180 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
120 return -ERESTART; 182 return ret;
121 } else { 183 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 184 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
123 } 185 }
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
247/* 309/*
248 * Call bo->mutex locked. 310 * Call bo->mutex locked.
249 */ 311 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 312static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{ 313{
253 struct ttm_bo_device *bdev = bo->bdev; 314 struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
275 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 336 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
276 page_flags | TTM_PAGE_FLAG_USER, 337 page_flags | TTM_PAGE_FLAG_USER,
277 glob->dummy_read_page); 338 glob->dummy_read_page);
278 if (unlikely(bo->ttm == NULL)) 339 if (unlikely(bo->ttm == NULL)) {
279 ret = -ENOMEM; 340 ret = -ENOMEM;
280 break; 341 break;
342 }
281 343
282 ret = ttm_tt_set_user(bo->ttm, current, 344 ret = ttm_tt_set_user(bo->ttm, current,
283 bo->buffer_start, bo->num_pages); 345 bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
328 } 390 }
329 391
330 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 392 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
331 393 bo->mem = *mem;
332 struct ttm_mem_reg *old_mem = &bo->mem;
333 uint32_t save_flags = old_mem->placement;
334
335 *old_mem = *mem;
336 mem->mm_node = NULL; 394 mem->mm_node = NULL;
337 ttm_flag_masked(&save_flags, mem->placement,
338 TTM_PL_MASK_MEMTYPE);
339 goto moved; 395 goto moved;
340 } 396 }
341 397
@@ -418,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
418 kref_put(&bo->list_kref, ttm_bo_ref_bug); 474 kref_put(&bo->list_kref, ttm_bo_ref_bug);
419 } 475 }
420 if (bo->mem.mm_node) { 476 if (bo->mem.mm_node) {
477 bo->mem.mm_node->private = NULL;
421 drm_mm_put_block(bo->mem.mm_node); 478 drm_mm_put_block(bo->mem.mm_node);
422 bo->mem.mm_node = NULL; 479 bo->mem.mm_node = NULL;
423 } 480 }
@@ -554,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
554} 611}
555EXPORT_SYMBOL(ttm_bo_unref); 612EXPORT_SYMBOL(ttm_bo_unref);
556 613
557static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, 614static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
558 bool interruptible, bool no_wait) 615 bool no_wait)
559{ 616{
560 int ret = 0;
561 struct ttm_bo_device *bdev = bo->bdev; 617 struct ttm_bo_device *bdev = bo->bdev;
562 struct ttm_bo_global *glob = bo->glob; 618 struct ttm_bo_global *glob = bo->glob;
563 struct ttm_mem_reg evict_mem; 619 struct ttm_mem_reg evict_mem;
564 uint32_t proposed_placement; 620 struct ttm_placement placement;
565 621 int ret = 0;
566 if (bo->mem.mem_type != mem_type)
567 goto out;
568 622
569 spin_lock(&bo->lock); 623 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 624 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock); 625 spin_unlock(&bo->lock);
572 626
573 if (unlikely(ret != 0)) { 627 if (unlikely(ret != 0)) {
574 if (ret != -ERESTART) { 628 if (ret != -ERESTARTSYS) {
575 printk(KERN_ERR TTM_PFX 629 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before " 630 "Failed to expire sync object before "
577 "buffer eviction.\n"); 631 "buffer eviction.\n");
@@ -584,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
584 evict_mem = bo->mem; 638 evict_mem = bo->mem;
585 evict_mem.mm_node = NULL; 639 evict_mem.mm_node = NULL;
586 640
587 proposed_placement = bdev->driver->evict_flags(bo); 641 placement.fpfn = 0;
588 642 placement.lpfn = 0;
589 ret = ttm_bo_mem_space(bo, proposed_placement, 643 placement.num_placement = 0;
590 &evict_mem, interruptible, no_wait); 644 placement.num_busy_placement = 0;
591 if (unlikely(ret != 0 && ret != -ERESTART)) 645 bdev->driver->evict_flags(bo, &placement);
592 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, 646 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
593 &evict_mem, interruptible, no_wait); 647 no_wait);
594
595 if (ret) { 648 if (ret) {
596 if (ret != -ERESTART) 649 if (ret != -ERESTARTSYS) {
597 printk(KERN_ERR TTM_PFX 650 printk(KERN_ERR TTM_PFX
598 "Failed to find memory space for " 651 "Failed to find memory space for "
599 "buffer 0x%p eviction.\n", bo); 652 "buffer 0x%p eviction.\n", bo);
653 ttm_bo_mem_space_debug(bo, &placement);
654 }
600 goto out; 655 goto out;
601 } 656 }
602 657
603 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 658 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
604 no_wait); 659 no_wait);
605 if (ret) { 660 if (ret) {
606 if (ret != -ERESTART) 661 if (ret != -ERESTARTSYS)
607 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 662 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
663 spin_lock(&glob->lru_lock);
664 if (evict_mem.mm_node) {
665 evict_mem.mm_node->private = NULL;
666 drm_mm_put_block(evict_mem.mm_node);
667 evict_mem.mm_node = NULL;
668 }
669 spin_unlock(&glob->lru_lock);
608 goto out; 670 goto out;
609 } 671 }
672 bo->evicted = true;
673out:
674 return ret;
675}
676
677static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
678 uint32_t mem_type,
679 bool interruptible, bool no_wait)
680{
681 struct ttm_bo_global *glob = bdev->glob;
682 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
683 struct ttm_buffer_object *bo;
684 int ret, put_count = 0;
610 685
611 spin_lock(&glob->lru_lock); 686 spin_lock(&glob->lru_lock);
612 if (evict_mem.mm_node) { 687 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
613 drm_mm_put_block(evict_mem.mm_node); 688 kref_get(&bo->list_kref);
614 evict_mem.mm_node = NULL; 689 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
615 } 690 if (likely(ret == 0))
691 put_count = ttm_bo_del_from_lru(bo);
616 spin_unlock(&glob->lru_lock); 692 spin_unlock(&glob->lru_lock);
617 bo->evicted = true; 693 if (unlikely(ret != 0))
618out: 694 return ret;
695 while (put_count--)
696 kref_put(&bo->list_kref, ttm_bo_ref_bug);
697 ret = ttm_bo_evict(bo, interruptible, no_wait);
698 ttm_bo_unreserve(bo);
699 kref_put(&bo->list_kref, ttm_bo_release_list);
619 return ret; 700 return ret;
620} 701}
621 702
703static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
704 struct ttm_mem_type_manager *man,
705 struct ttm_placement *placement,
706 struct ttm_mem_reg *mem,
707 struct drm_mm_node **node)
708{
709 struct ttm_bo_global *glob = bo->glob;
710 unsigned long lpfn;
711 int ret;
712
713 lpfn = placement->lpfn;
714 if (!lpfn)
715 lpfn = man->size;
716 *node = NULL;
717 do {
718 ret = drm_mm_pre_get(&man->manager);
719 if (unlikely(ret))
720 return ret;
721
722 spin_lock(&glob->lru_lock);
723 *node = drm_mm_search_free_in_range(&man->manager,
724 mem->num_pages, mem->page_alignment,
725 placement->fpfn, lpfn, 1);
726 if (unlikely(*node == NULL)) {
727 spin_unlock(&glob->lru_lock);
728 return 0;
729 }
730 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
731 mem->page_alignment,
732 placement->fpfn,
733 lpfn);
734 spin_unlock(&glob->lru_lock);
735 } while (*node == NULL);
736 return 0;
737}
738
622/** 739/**
623 * Repeatedly evict memory from the LRU for @mem_type until we create enough 740 * Repeatedly evict memory from the LRU for @mem_type until we create enough
624 * space, or we've evicted everything and there isn't enough space. 741 * space, or we've evicted everything and there isn't enough space.
625 */ 742 */
626static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, 743static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
627 struct ttm_mem_reg *mem, 744 uint32_t mem_type,
628 uint32_t mem_type, 745 struct ttm_placement *placement,
629 bool interruptible, bool no_wait) 746 struct ttm_mem_reg *mem,
747 bool interruptible, bool no_wait)
630{ 748{
749 struct ttm_bo_device *bdev = bo->bdev;
631 struct ttm_bo_global *glob = bdev->glob; 750 struct ttm_bo_global *glob = bdev->glob;
632 struct drm_mm_node *node;
633 struct ttm_buffer_object *entry;
634 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 751 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
635 struct list_head *lru; 752 struct drm_mm_node *node;
636 unsigned long num_pages = mem->num_pages;
637 int put_count = 0;
638 int ret; 753 int ret;
639 754
640retry_pre_get:
641 ret = drm_mm_pre_get(&man->manager);
642 if (unlikely(ret != 0))
643 return ret;
644
645 spin_lock(&glob->lru_lock);
646 do { 755 do {
647 node = drm_mm_search_free(&man->manager, num_pages, 756 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
648 mem->page_alignment, 1); 757 if (unlikely(ret != 0))
758 return ret;
649 if (node) 759 if (node)
650 break; 760 break;
651 761 spin_lock(&glob->lru_lock);
652 lru = &man->lru; 762 if (list_empty(&man->lru)) {
653 if (list_empty(lru)) 763 spin_unlock(&glob->lru_lock);
654 break; 764 break;
655 765 }
656 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
657 kref_get(&entry->list_kref);
658
659 ret =
660 ttm_bo_reserve_locked(entry, interruptible, no_wait,
661 false, 0);
662
663 if (likely(ret == 0))
664 put_count = ttm_bo_del_from_lru(entry);
665
666 spin_unlock(&glob->lru_lock); 766 spin_unlock(&glob->lru_lock);
667 767 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
768 no_wait);
668 if (unlikely(ret != 0)) 769 if (unlikely(ret != 0))
669 return ret; 770 return ret;
670
671 while (put_count--)
672 kref_put(&entry->list_kref, ttm_bo_ref_bug);
673
674 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
675
676 ttm_bo_unreserve(entry);
677
678 kref_put(&entry->list_kref, ttm_bo_release_list);
679 if (ret)
680 return ret;
681
682 spin_lock(&glob->lru_lock);
683 } while (1); 771 } while (1);
684 772 if (node == NULL)
685 if (!node) {
686 spin_unlock(&glob->lru_lock);
687 return -ENOMEM; 773 return -ENOMEM;
688 }
689
690 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
691 if (unlikely(!node)) {
692 spin_unlock(&glob->lru_lock);
693 goto retry_pre_get;
694 }
695
696 spin_unlock(&glob->lru_lock);
697 mem->mm_node = node; 774 mem->mm_node = node;
698 mem->mem_type = mem_type; 775 mem->mem_type = mem_type;
699 return 0; 776 return 0;
@@ -724,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
724 return result; 801 return result;
725} 802}
726 803
727
728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 804static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
729 bool disallow_fixed, 805 bool disallow_fixed,
730 uint32_t mem_type, 806 uint32_t mem_type,
@@ -757,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
757 * space. 833 * space.
758 */ 834 */
759int ttm_bo_mem_space(struct ttm_buffer_object *bo, 835int ttm_bo_mem_space(struct ttm_buffer_object *bo,
760 uint32_t proposed_placement, 836 struct ttm_placement *placement,
761 struct ttm_mem_reg *mem, 837 struct ttm_mem_reg *mem,
762 bool interruptible, bool no_wait) 838 bool interruptible, bool no_wait)
763{ 839{
764 struct ttm_bo_device *bdev = bo->bdev; 840 struct ttm_bo_device *bdev = bo->bdev;
765 struct ttm_bo_global *glob = bo->glob;
766 struct ttm_mem_type_manager *man; 841 struct ttm_mem_type_manager *man;
767
768 uint32_t num_prios = bdev->driver->num_mem_type_prio;
769 const uint32_t *prios = bdev->driver->mem_type_prio;
770 uint32_t i;
771 uint32_t mem_type = TTM_PL_SYSTEM; 842 uint32_t mem_type = TTM_PL_SYSTEM;
772 uint32_t cur_flags = 0; 843 uint32_t cur_flags = 0;
773 bool type_found = false; 844 bool type_found = false;
774 bool type_ok = false; 845 bool type_ok = false;
775 bool has_eagain = false; 846 bool has_erestartsys = false;
776 struct drm_mm_node *node = NULL; 847 struct drm_mm_node *node = NULL;
777 int ret; 848 int i, ret;
778 849
779 mem->mm_node = NULL; 850 mem->mm_node = NULL;
780 for (i = 0; i < num_prios; ++i) { 851 for (i = 0; i <= placement->num_placement; ++i) {
781 mem_type = prios[i]; 852 ret = ttm_mem_type_from_flags(placement->placement[i],
853 &mem_type);
854 if (ret)
855 return ret;
782 man = &bdev->man[mem_type]; 856 man = &bdev->man[mem_type];
783 857
784 type_ok = ttm_bo_mt_compatible(man, 858 type_ok = ttm_bo_mt_compatible(man,
785 bo->type == ttm_bo_type_user, 859 bo->type == ttm_bo_type_user,
786 mem_type, proposed_placement, 860 mem_type,
787 &cur_flags); 861 placement->placement[i],
862 &cur_flags);
788 863
789 if (!type_ok) 864 if (!type_ok)
790 continue; 865 continue;
791 866
792 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 867 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
793 cur_flags); 868 cur_flags);
869 /*
870 * Use the access and other non-mapping-related flag bits from
871 * the memory placement flags to the current flags
872 */
873 ttm_flag_masked(&cur_flags, placement->placement[i],
874 ~TTM_PL_MASK_MEMTYPE);
794 875
795 if (mem_type == TTM_PL_SYSTEM) 876 if (mem_type == TTM_PL_SYSTEM)
796 break; 877 break;
797 878
798 if (man->has_type && man->use_type) { 879 if (man->has_type && man->use_type) {
799 type_found = true; 880 type_found = true;
800 do { 881 ret = ttm_bo_man_get_node(bo, man, placement, mem,
801 ret = drm_mm_pre_get(&man->manager); 882 &node);
802 if (unlikely(ret)) 883 if (unlikely(ret))
803 return ret; 884 return ret;
804
805 spin_lock(&glob->lru_lock);
806 node = drm_mm_search_free(&man->manager,
807 mem->num_pages,
808 mem->page_alignment,
809 1);
810 if (unlikely(!node)) {
811 spin_unlock(&glob->lru_lock);
812 break;
813 }
814 node = drm_mm_get_block_atomic(node,
815 mem->num_pages,
816 mem->
817 page_alignment);
818 spin_unlock(&glob->lru_lock);
819 } while (!node);
820 } 885 }
821 if (node) 886 if (node)
822 break; 887 break;
@@ -826,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
826 mem->mm_node = node; 891 mem->mm_node = node;
827 mem->mem_type = mem_type; 892 mem->mem_type = mem_type;
828 mem->placement = cur_flags; 893 mem->placement = cur_flags;
894 if (node)
895 node->private = bo;
829 return 0; 896 return 0;
830 } 897 }
831 898
832 if (!type_found) 899 if (!type_found)
833 return -EINVAL; 900 return -EINVAL;
834 901
835 num_prios = bdev->driver->num_mem_busy_prio; 902 for (i = 0; i <= placement->num_busy_placement; ++i) {
836 prios = bdev->driver->mem_busy_prio; 903 ret = ttm_mem_type_from_flags(placement->placement[i],
837 904 &mem_type);
838 for (i = 0; i < num_prios; ++i) { 905 if (ret)
839 mem_type = prios[i]; 906 return ret;
840 man = &bdev->man[mem_type]; 907 man = &bdev->man[mem_type];
841
842 if (!man->has_type) 908 if (!man->has_type)
843 continue; 909 continue;
844
845 if (!ttm_bo_mt_compatible(man, 910 if (!ttm_bo_mt_compatible(man,
846 bo->type == ttm_bo_type_user, 911 bo->type == ttm_bo_type_user,
847 mem_type, 912 mem_type,
848 proposed_placement, &cur_flags)) 913 placement->placement[i],
914 &cur_flags))
849 continue; 915 continue;
850 916
851 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 917 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
852 cur_flags); 918 cur_flags);
919 /*
920 * Use the access and other non-mapping-related flag bits from
921 * the memory placement flags to the current flags
922 */
923 ttm_flag_masked(&cur_flags, placement->placement[i],
924 ~TTM_PL_MASK_MEMTYPE);
853 925
854 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 926 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
855 interruptible, no_wait); 927 interruptible, no_wait);
856
857 if (ret == 0 && mem->mm_node) { 928 if (ret == 0 && mem->mm_node) {
858 mem->placement = cur_flags; 929 mem->placement = cur_flags;
930 mem->mm_node->private = bo;
859 return 0; 931 return 0;
860 } 932 }
861 933 if (ret == -ERESTARTSYS)
862 if (ret == -ERESTART) 934 has_erestartsys = true;
863 has_eagain = true;
864 } 935 }
865 936 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
866 ret = (has_eagain) ? -ERESTART : -ENOMEM;
867 return ret; 937 return ret;
868} 938}
869EXPORT_SYMBOL(ttm_bo_mem_space); 939EXPORT_SYMBOL(ttm_bo_mem_space);
870 940
871int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) 941int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
872{ 942{
873 int ret = 0;
874
875 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) 943 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
876 return -EBUSY; 944 return -EBUSY;
877 945
878 ret = wait_event_interruptible(bo->event_queue, 946 return wait_event_interruptible(bo->event_queue,
879 atomic_read(&bo->cpu_writers) == 0); 947 atomic_read(&bo->cpu_writers) == 0);
880
881 if (ret == -ERESTARTSYS)
882 ret = -ERESTART;
883
884 return ret;
885} 948}
886 949
887int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 950int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
888 uint32_t proposed_placement, 951 struct ttm_placement *placement,
889 bool interruptible, bool no_wait) 952 bool interruptible, bool no_wait)
890{ 953{
891 struct ttm_bo_global *glob = bo->glob; 954 struct ttm_bo_global *glob = bo->glob;
892 int ret = 0; 955 int ret = 0;
@@ -899,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
899 * Have the driver move function wait for idle when necessary, 962 * Have the driver move function wait for idle when necessary,
900 * instead of doing it here. 963 * instead of doing it here.
901 */ 964 */
902
903 spin_lock(&bo->lock); 965 spin_lock(&bo->lock);
904 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 966 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
905 spin_unlock(&bo->lock); 967 spin_unlock(&bo->lock);
906
907 if (ret) 968 if (ret)
908 return ret; 969 return ret;
909
910 mem.num_pages = bo->num_pages; 970 mem.num_pages = bo->num_pages;
911 mem.size = mem.num_pages << PAGE_SHIFT; 971 mem.size = mem.num_pages << PAGE_SHIFT;
912 mem.page_alignment = bo->mem.page_alignment; 972 mem.page_alignment = bo->mem.page_alignment;
913
914 /* 973 /*
915 * Determine where to move the buffer. 974 * Determine where to move the buffer.
916 */ 975 */
917 976 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
918 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
919 interruptible, no_wait);
920 if (ret) 977 if (ret)
921 goto out_unlock; 978 goto out_unlock;
922
923 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 979 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
924
925out_unlock: 980out_unlock:
926 if (ret && mem.mm_node) { 981 if (ret && mem.mm_node) {
927 spin_lock(&glob->lru_lock); 982 spin_lock(&glob->lru_lock);
983 mem.mm_node->private = NULL;
928 drm_mm_put_block(mem.mm_node); 984 drm_mm_put_block(mem.mm_node);
929 spin_unlock(&glob->lru_lock); 985 spin_unlock(&glob->lru_lock);
930 } 986 }
931 return ret; 987 return ret;
932} 988}
933 989
934static int ttm_bo_mem_compat(uint32_t proposed_placement, 990static int ttm_bo_mem_compat(struct ttm_placement *placement,
935 struct ttm_mem_reg *mem) 991 struct ttm_mem_reg *mem)
936{ 992{
937 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) 993 int i;
938 return 0; 994
939 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) 995 for (i = 0; i < placement->num_placement; i++) {
940 return 0; 996 if ((placement->placement[i] & mem->placement &
941 997 TTM_PL_MASK_CACHING) &&
942 return 1; 998 (placement->placement[i] & mem->placement &
999 TTM_PL_MASK_MEM))
1000 return i;
1001 }
1002 return -1;
943} 1003}
944 1004
945int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 1005int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
946 uint32_t proposed_placement, 1006 struct ttm_placement *placement,
947 bool interruptible, bool no_wait) 1007 bool interruptible, bool no_wait)
948{ 1008{
949 int ret; 1009 int ret;
950 1010
951 BUG_ON(!atomic_read(&bo->reserved)); 1011 BUG_ON(!atomic_read(&bo->reserved));
952 bo->proposed_placement = proposed_placement; 1012 /* Check that range is valid */
953 1013 if (placement->lpfn || placement->fpfn)
954 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", 1014 if (placement->fpfn > placement->lpfn ||
955 (unsigned long)proposed_placement, 1015 (placement->lpfn - placement->fpfn) < bo->num_pages)
956 (unsigned long)bo->mem.placement); 1016 return -EINVAL;
957
958 /* 1017 /*
959 * Check whether we need to move buffer. 1018 * Check whether we need to move buffer.
960 */ 1019 */
961 1020 ret = ttm_bo_mem_compat(placement, &bo->mem);
962 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { 1021 if (ret < 0) {
963 ret = ttm_bo_move_buffer(bo, bo->proposed_placement, 1022 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
964 interruptible, no_wait); 1023 if (ret)
965 if (ret) {
966 if (ret != -ERESTART)
967 printk(KERN_ERR TTM_PFX
968 "Failed moving buffer. "
969 "Proposed placement 0x%08x\n",
970 bo->proposed_placement);
971 if (ret == -ENOMEM)
972 printk(KERN_ERR TTM_PFX
973 "Out of aperture space or "
974 "DRM memory quota.\n");
975 return ret; 1024 return ret;
976 } 1025 } else {
1026 /*
1027 * Use the access and other non-mapping-related flag bits from
1028 * the compatible memory placement flags to the active flags
1029 */
1030 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1031 ~TTM_PL_MASK_MEMTYPE);
977 } 1032 }
978
979 /* 1033 /*
980 * We might need to add a TTM. 1034 * We might need to add a TTM.
981 */ 1035 */
982
983 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1036 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
984 ret = ttm_bo_add_ttm(bo, true); 1037 ret = ttm_bo_add_ttm(bo, true);
985 if (ret) 1038 if (ret)
986 return ret; 1039 return ret;
987 } 1040 }
988 /*
989 * Validation has succeeded, move the access and other
990 * non-mapping-related flag bits from the proposed flags to
991 * the active flags
992 */
993
994 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
995 ~TTM_PL_MASK_MEMTYPE);
996
997 return 0; 1041 return 0;
998} 1042}
999EXPORT_SYMBOL(ttm_buffer_object_validate); 1043EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1041,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1041 size_t acc_size, 1085 size_t acc_size,
1042 void (*destroy) (struct ttm_buffer_object *)) 1086 void (*destroy) (struct ttm_buffer_object *))
1043{ 1087{
1044 int ret = 0; 1088 int i, c, ret = 0;
1045 unsigned long num_pages; 1089 unsigned long num_pages;
1090 uint32_t placements[8];
1091 struct ttm_placement placement;
1046 1092
1047 size += buffer_start & ~PAGE_MASK; 1093 size += buffer_start & ~PAGE_MASK;
1048 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1094 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1099,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1099 goto out_err; 1145 goto out_err;
1100 } 1146 }
1101 1147
1102 ret = ttm_buffer_object_validate(bo, flags, interruptible, false); 1148 placement.fpfn = 0;
1149 placement.lpfn = 0;
1150 for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
1151 if (flags & (1 << i))
1152 placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
1153 placement.placement = placements;
1154 placement.num_placement = c;
1155 placement.busy_placement = placements;
1156 placement.num_busy_placement = c;
1157 ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
1103 if (ret) 1158 if (ret)
1104 goto out_err; 1159 goto out_err;
1105 1160
@@ -1134,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1134 struct ttm_buffer_object **p_bo) 1189 struct ttm_buffer_object **p_bo)
1135{ 1190{
1136 struct ttm_buffer_object *bo; 1191 struct ttm_buffer_object *bo;
1137 int ret;
1138 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1192 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1193 int ret;
1139 1194
1140 size_t acc_size = 1195 size_t acc_size =
1141 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1196 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1160,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1160 return ret; 1215 return ret;
1161} 1216}
1162 1217
1163static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1164 uint32_t mem_type, bool allow_errors)
1165{
1166 int ret;
1167
1168 spin_lock(&bo->lock);
1169 ret = ttm_bo_wait(bo, false, false, false);
1170 spin_unlock(&bo->lock);
1171
1172 if (ret && allow_errors)
1173 goto out;
1174
1175 if (bo->mem.mem_type == mem_type)
1176 ret = ttm_bo_evict(bo, mem_type, false, false);
1177
1178 if (ret) {
1179 if (allow_errors) {
1180 goto out;
1181 } else {
1182 ret = 0;
1183 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1184 }
1185 }
1186
1187out:
1188 return ret;
1189}
1190
1191static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1218static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1192 struct list_head *head, 1219 unsigned mem_type, bool allow_errors)
1193 unsigned mem_type, bool allow_errors)
1194{ 1220{
1221 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1195 struct ttm_bo_global *glob = bdev->glob; 1222 struct ttm_bo_global *glob = bdev->glob;
1196 struct ttm_buffer_object *entry;
1197 int ret; 1223 int ret;
1198 int put_count;
1199 1224
1200 /* 1225 /*
1201 * Can't use standard list traversal since we're unlocking. 1226 * Can't use standard list traversal since we're unlocking.
1202 */ 1227 */
1203 1228
1204 spin_lock(&glob->lru_lock); 1229 spin_lock(&glob->lru_lock);
1205 1230 while (!list_empty(&man->lru)) {
1206 while (!list_empty(head)) {
1207 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1208 kref_get(&entry->list_kref);
1209 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1210 put_count = ttm_bo_del_from_lru(entry);
1211 spin_unlock(&glob->lru_lock); 1231 spin_unlock(&glob->lru_lock);
1212 while (put_count--) 1232 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1213 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1233 if (ret) {
1214 BUG_ON(ret); 1234 if (allow_errors) {
1215 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1235 return ret;
1216 ttm_bo_unreserve(entry); 1236 } else {
1217 kref_put(&entry->list_kref, ttm_bo_release_list); 1237 printk(KERN_ERR TTM_PFX
1238 "Cleanup eviction failed\n");
1239 }
1240 }
1218 spin_lock(&glob->lru_lock); 1241 spin_lock(&glob->lru_lock);
1219 } 1242 }
1220
1221 spin_unlock(&glob->lru_lock); 1243 spin_unlock(&glob->lru_lock);
1222
1223 return 0; 1244 return 0;
1224} 1245}
1225 1246
@@ -1246,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1246 1267
1247 ret = 0; 1268 ret = 0;
1248 if (mem_type > 0) { 1269 if (mem_type > 0) {
1249 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1270 ttm_bo_force_list_clean(bdev, mem_type, false);
1250 1271
1251 spin_lock(&glob->lru_lock); 1272 spin_lock(&glob->lru_lock);
1252 if (drm_mm_clean(&man->manager)) 1273 if (drm_mm_clean(&man->manager))
@@ -1279,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1279 return 0; 1300 return 0;
1280 } 1301 }
1281 1302
1282 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); 1303 return ttm_bo_force_list_clean(bdev, mem_type, true);
1283} 1304}
1284EXPORT_SYMBOL(ttm_bo_evict_mm); 1305EXPORT_SYMBOL(ttm_bo_evict_mm);
1285 1306
1286int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1307int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1287 unsigned long p_offset, unsigned long p_size) 1308 unsigned long p_size)
1288{ 1309{
1289 int ret = -EINVAL; 1310 int ret = -EINVAL;
1290 struct ttm_mem_type_manager *man; 1311 struct ttm_mem_type_manager *man;
@@ -1314,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1314 type); 1335 type);
1315 return ret; 1336 return ret;
1316 } 1337 }
1317 ret = drm_mm_init(&man->manager, p_offset, p_size); 1338 ret = drm_mm_init(&man->manager, 0, p_size);
1318 if (ret) 1339 if (ret)
1319 return ret; 1340 return ret;
1320 } 1341 }
@@ -1463,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1463 * Initialize the system memory buffer type. 1484 * Initialize the system memory buffer type.
1464 * Other types need to be driver / IOCTL initialized. 1485 * Other types need to be driver / IOCTL initialized.
1465 */ 1486 */
1466 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1487 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1467 if (unlikely(ret != 0)) 1488 if (unlikely(ret != 0))
1468 goto out_no_sys; 1489 goto out_no_sys;
1469 1490
@@ -1693,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1693 ret = wait_event_interruptible 1714 ret = wait_event_interruptible
1694 (bo->event_queue, atomic_read(&bo->reserved) == 0); 1715 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1695 if (unlikely(ret != 0)) 1716 if (unlikely(ret != 0))
1696 return -ERESTART; 1717 return ret;
1697 } else { 1718 } else {
1698 wait_event(bo->event_queue, 1719 wait_event(bo->event_queue,
1699 atomic_read(&bo->reserved) == 0); 1720 atomic_read(&bo->reserved) == 0);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572d2b91..2ecf7d0c64f6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369#endif 369#endif
370 return tmp; 370 return tmp;
371} 371}
372EXPORT_SYMBOL(ttm_io_prot);
372 373
373static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 374static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
374 unsigned long bus_base, 375 unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d040338..609a85a4d855 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
114 ret = ttm_bo_wait(bo, false, true, false); 114 ret = ttm_bo_wait(bo, false, true, false);
115 spin_unlock(&bo->lock); 115 spin_unlock(&bo->lock);
116 if (unlikely(ret != 0)) { 116 if (unlikely(ret != 0)) {
117 retval = (ret != -ERESTART) ? 117 retval = (ret != -ERESTARTSYS) ?
118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
119 goto out_unlock; 119 goto out_unlock;
120 } 120 }
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
349 switch (ret) { 349 switch (ret) {
350 case 0: 350 case 0:
351 break; 351 break;
352 case -ERESTART:
353 ret = -EINTR;
354 goto out_unref;
355 case -EBUSY: 352 case -EBUSY:
356 ret = -EAGAIN; 353 ret = -EAGAIN;
357 goto out_unref; 354 goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
421 switch (ret) { 418 switch (ret) {
422 case 0: 419 case 0:
423 break; 420 break;
424 case -ERESTART:
425 return -EINTR;
426 case -EBUSY: 421 case -EBUSY:
427 return -EAGAIN; 422 return -EAGAIN;
428 default: 423 default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 000000000000..c285c2902d15
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_execbuf_util.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35void ttm_eu_backoff_reservation(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 entry->reserved = false;
45 ttm_bo_unreserve(bo);
46 }
47}
48EXPORT_SYMBOL(ttm_eu_backoff_reservation);
49
50/*
51 * Reserve buffers for validation.
52 *
53 * If a buffer in the list is marked for CPU access, we back off and
54 * wait for that buffer to become free for GPU access.
55 *
56 * If a buffer is reserved for another validation, the validator with
57 * the highest validation sequence backs off and waits for that buffer
58 * to become unreserved. This prevents deadlocks when validating multiple
59 * buffers in different orders.
60 */
61
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
63{
64 struct ttm_validate_buffer *entry;
65 int ret;
66
67retry:
68 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo;
70
71 entry->reserved = false;
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq);
73 if (ret != 0) {
74 ttm_eu_backoff_reservation(list);
75 if (ret == -EAGAIN) {
76 ret = ttm_bo_wait_unreserved(bo, true);
77 if (unlikely(ret != 0))
78 return ret;
79 goto retry;
80 } else
81 return ret;
82 }
83
84 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list);
87 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret)
89 return ret;
90 goto retry;
91 }
92 }
93 return 0;
94}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers);
96
97void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
98{
99 struct ttm_validate_buffer *entry;
100
101 list_for_each_entry(entry, list, head) {
102 struct ttm_buffer_object *bo = entry->bo;
103 struct ttm_bo_driver *driver = bo->bdev->driver;
104 void *old_sync_obj;
105
106 spin_lock(&bo->lock);
107 old_sync_obj = bo->sync_obj;
108 bo->sync_obj = driver->sync_obj_ref(sync_obj);
109 bo->sync_obj_arg = entry->new_sync_obj_arg;
110 spin_unlock(&bo->lock);
111 ttm_bo_unreserve(bo);
112 entry->reserved = false;
113 if (old_sync_obj)
114 driver->sync_obj_unref(&old_sync_obj);
115 }
116}
117EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 000000000000..f619ebcaa4ec
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_lock.h"
32#include "ttm/ttm_module.h"
33#include <asm/atomic.h>
34#include <linux/errno.h>
35#include <linux/wait.h>
36#include <linux/sched.h>
37#include <linux/module.h>
38
39#define TTM_WRITE_LOCK_PENDING (1 << 0)
40#define TTM_VT_LOCK_PENDING (1 << 1)
41#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
42#define TTM_VT_LOCK (1 << 3)
43#define TTM_SUSPEND_LOCK (1 << 4)
44
45void ttm_lock_init(struct ttm_lock *lock)
46{
47 spin_lock_init(&lock->lock);
48 init_waitqueue_head(&lock->queue);
49 lock->rw = 0;
50 lock->flags = 0;
51 lock->kill_takers = false;
52 lock->signal = SIGKILL;
53}
54EXPORT_SYMBOL(ttm_lock_init);
55
56void ttm_read_unlock(struct ttm_lock *lock)
57{
58 spin_lock(&lock->lock);
59 if (--lock->rw == 0)
60 wake_up_all(&lock->queue);
61 spin_unlock(&lock->lock);
62}
63EXPORT_SYMBOL(ttm_read_unlock);
64
65static bool __ttm_read_lock(struct ttm_lock *lock)
66{
67 bool locked = false;
68
69 spin_lock(&lock->lock);
70 if (unlikely(lock->kill_takers)) {
71 send_sig(lock->signal, current, 0);
72 spin_unlock(&lock->lock);
73 return false;
74 }
75 if (lock->rw >= 0 && lock->flags == 0) {
76 ++lock->rw;
77 locked = true;
78 }
79 spin_unlock(&lock->lock);
80 return locked;
81}
82
83int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84{
85 int ret = 0;
86
87 if (interruptible)
88 ret = wait_event_interruptible(lock->queue,
89 __ttm_read_lock(lock));
90 else
91 wait_event(lock->queue, __ttm_read_lock(lock));
92 return ret;
93}
94EXPORT_SYMBOL(ttm_read_lock);
95
96static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97{
98 bool block = true;
99
100 *locked = false;
101
102 spin_lock(&lock->lock);
103 if (unlikely(lock->kill_takers)) {
104 send_sig(lock->signal, current, 0);
105 spin_unlock(&lock->lock);
106 return false;
107 }
108 if (lock->rw >= 0 && lock->flags == 0) {
109 ++lock->rw;
110 block = false;
111 *locked = true;
112 } else if (lock->flags == 0) {
113 block = false;
114 }
115 spin_unlock(&lock->lock);
116
117 return !block;
118}
119
120int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121{
122 int ret = 0;
123 bool locked;
124
125 if (interruptible)
126 ret = wait_event_interruptible
127 (lock->queue, __ttm_read_trylock(lock, &locked));
128 else
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130
131 if (unlikely(ret != 0)) {
132 BUG_ON(locked);
133 return ret;
134 }
135
136 return (locked) ? 0 : -EBUSY;
137}
138
139void ttm_write_unlock(struct ttm_lock *lock)
140{
141 spin_lock(&lock->lock);
142 lock->rw = 0;
143 wake_up_all(&lock->queue);
144 spin_unlock(&lock->lock);
145}
146EXPORT_SYMBOL(ttm_write_unlock);
147
148static bool __ttm_write_lock(struct ttm_lock *lock)
149{
150 bool locked = false;
151
152 spin_lock(&lock->lock);
153 if (unlikely(lock->kill_takers)) {
154 send_sig(lock->signal, current, 0);
155 spin_unlock(&lock->lock);
156 return false;
157 }
158 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159 lock->rw = -1;
160 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161 locked = true;
162 } else {
163 lock->flags |= TTM_WRITE_LOCK_PENDING;
164 }
165 spin_unlock(&lock->lock);
166 return locked;
167}
168
169int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170{
171 int ret = 0;
172
173 if (interruptible) {
174 ret = wait_event_interruptible(lock->queue,
175 __ttm_write_lock(lock));
176 if (unlikely(ret != 0)) {
177 spin_lock(&lock->lock);
178 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179 wake_up_all(&lock->queue);
180 spin_unlock(&lock->lock);
181 }
182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock));
184
185 return ret;
186}
187EXPORT_SYMBOL(ttm_write_lock);
188
189void ttm_write_lock_downgrade(struct ttm_lock *lock)
190{
191 spin_lock(&lock->lock);
192 lock->rw = 1;
193 wake_up_all(&lock->queue);
194 spin_unlock(&lock->lock);
195}
196
197static int __ttm_vt_unlock(struct ttm_lock *lock)
198{
199 int ret = 0;
200
201 spin_lock(&lock->lock);
202 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
203 ret = -EINVAL;
204 lock->flags &= ~TTM_VT_LOCK;
205 wake_up_all(&lock->queue);
206 spin_unlock(&lock->lock);
207 printk(KERN_INFO TTM_PFX "vt unlock.\n");
208
209 return ret;
210}
211
212static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
213{
214 struct ttm_base_object *base = *p_base;
215 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
216 int ret;
217
218 *p_base = NULL;
219 ret = __ttm_vt_unlock(lock);
220 BUG_ON(ret != 0);
221}
222
223static bool __ttm_vt_lock(struct ttm_lock *lock)
224{
225 bool locked = false;
226
227 spin_lock(&lock->lock);
228 if (lock->rw == 0) {
229 lock->flags &= ~TTM_VT_LOCK_PENDING;
230 lock->flags |= TTM_VT_LOCK;
231 locked = true;
232 } else {
233 lock->flags |= TTM_VT_LOCK_PENDING;
234 }
235 spin_unlock(&lock->lock);
236 return locked;
237}
238
239int ttm_vt_lock(struct ttm_lock *lock,
240 bool interruptible,
241 struct ttm_object_file *tfile)
242{
243 int ret = 0;
244
245 if (interruptible) {
246 ret = wait_event_interruptible(lock->queue,
247 __ttm_vt_lock(lock));
248 if (unlikely(ret != 0)) {
249 spin_lock(&lock->lock);
250 lock->flags &= ~TTM_VT_LOCK_PENDING;
251 wake_up_all(&lock->queue);
252 spin_unlock(&lock->lock);
253 return ret;
254 }
255 } else
256 wait_event(lock->queue, __ttm_vt_lock(lock));
257
258 /*
259 * Add a base-object, the destructor of which will
260 * make sure the lock is released if the client dies
261 * while holding it.
262 */
263
264 ret = ttm_base_object_init(tfile, &lock->base, false,
265 ttm_lock_type, &ttm_vt_lock_remove, NULL);
266 if (ret)
267 (void)__ttm_vt_unlock(lock);
268 else {
269 lock->vt_holder = tfile;
270 printk(KERN_INFO TTM_PFX "vt lock.\n");
271 }
272
273 return ret;
274}
275EXPORT_SYMBOL(ttm_vt_lock);
276
277int ttm_vt_unlock(struct ttm_lock *lock)
278{
279 return ttm_ref_object_base_unref(lock->vt_holder,
280 lock->base.hash.key, TTM_REF_USAGE);
281}
282EXPORT_SYMBOL(ttm_vt_unlock);
283
284void ttm_suspend_unlock(struct ttm_lock *lock)
285{
286 spin_lock(&lock->lock);
287 lock->flags &= ~TTM_SUSPEND_LOCK;
288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock);
290}
291
292static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{
294 bool locked = false;
295
296 spin_lock(&lock->lock);
297 if (lock->rw == 0) {
298 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
299 lock->flags |= TTM_SUSPEND_LOCK;
300 locked = true;
301 } else {
302 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
303 }
304 spin_unlock(&lock->lock);
305 return locked;
306}
307
308void ttm_suspend_lock(struct ttm_lock *lock)
309{
310 wait_event(lock->queue, __ttm_suspend_lock(lock));
311}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb5..f5245c02b8fd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, 274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
275 const struct sysinfo *si) 275 const struct sysinfo *si)
276{ 276{
277 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 277 struct ttm_mem_zone *zone;
278 uint64_t mem; 278 uint64_t mem;
279 int ret; 279 int ret;
280 280
281 if (unlikely(!zone))
282 return -ENOMEM;
283
284 if (si->totalhigh == 0) 281 if (si->totalhigh == 0)
285 return 0; 282 return 0;
286 283
284 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
285 if (unlikely(!zone))
286 return -ENOMEM;
287
287 mem = si->totalram; 288 mem = si->totalram;
288 mem *= si->mem_unit; 289 mem *= si->mem_unit;
289 290
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
322 * No special dma32 zone needed. 323 * No special dma32 zone needed.
323 */ 324 */
324 325
325 if (mem <= ((uint64_t) 1ULL << 32)) 326 if (mem <= ((uint64_t) 1ULL << 32)) {
327 kfree(zone);
326 return 0; 328 return 0;
329 }
327 330
328 /* 331 /*
329 * Limit max dma32 memory to 4GB for now 332 * Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
460{ 463{
461 return ttm_mem_global_free_zone(glob, NULL, amount); 464 return ttm_mem_global_free_zone(glob, NULL, amount);
462} 465}
466EXPORT_SYMBOL(ttm_mem_global_free);
463 467
464static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 468static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
465 struct ttm_mem_zone *single_zone, 469 struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
533 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, 537 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
534 interruptible); 538 interruptible);
535} 539}
540EXPORT_SYMBOL(ttm_mem_global_alloc);
536 541
537int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 542int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
538 struct page *page, 543 struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
588 } 593 }
589 return 0; 594 return 0;
590} 595}
596EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 000000000000..1099abac824b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#include "ttm/ttm_object.h"
53#include "ttm/ttm_module.h"
54#include <linux/list.h>
55#include <linux/spinlock.h>
56#include <linux/slab.h>
57#include <linux/module.h>
58#include <asm/atomic.h>
59
60struct ttm_object_file {
61 struct ttm_object_device *tdev;
62 rwlock_t lock;
63 struct list_head ref_list;
64 struct drm_open_hash ref_hash[TTM_REF_NUM];
65 struct kref refcount;
66};
67
68/**
69 * struct ttm_object_device
70 *
71 * @object_lock: lock that protects the object_hash hash table.
72 *
73 * @object_hash: hash table for fast lookup of object global names.
74 *
75 * @object_count: Per device object count.
76 *
77 * This is the per-device data structure needed for ttm object management.
78 */
79
80struct ttm_object_device {
81 rwlock_t object_lock;
82 struct drm_open_hash object_hash;
83 atomic_t object_count;
84 struct ttm_mem_global *mem_glob;
85};
86
87/**
88 * struct ttm_ref_object
89 *
90 * @hash: Hash entry for the per-file object reference hash.
91 *
92 * @head: List entry for the per-file list of ref-objects.
93 *
94 * @kref: Ref count.
95 *
96 * @obj: Base object this ref object is referencing.
97 *
98 * @ref_type: Type of ref object.
99 *
100 * This is similar to an idr object, but it also has a hash table entry
101 * that allows lookup with a pointer to the referenced object as a key. In
102 * that way, one can easily detect whether a base object is referenced by
103 * a particular ttm_object_file. It also carries a ref count to avoid creating
104 * multiple ref objects if a ttm_object_file references the same base
105 * object more than once.
106 */
107
108struct ttm_ref_object {
109 struct drm_hash_item hash;
110 struct list_head head;
111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type;
114 struct ttm_object_file *tfile;
115};
116
117static inline struct ttm_object_file *
118ttm_object_file_ref(struct ttm_object_file *tfile)
119{
120 kref_get(&tfile->refcount);
121 return tfile;
122}
123
124static void ttm_object_file_destroy(struct kref *kref)
125{
126 struct ttm_object_file *tfile =
127 container_of(kref, struct ttm_object_file, refcount);
128
129 kfree(tfile);
130}
131
132
133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
134{
135 struct ttm_object_file *tfile = *p_tfile;
136
137 *p_tfile = NULL;
138 kref_put(&tfile->refcount, ttm_object_file_destroy);
139}
140
141
142int ttm_base_object_init(struct ttm_object_file *tfile,
143 struct ttm_base_object *base,
144 bool shareable,
145 enum ttm_object_type object_type,
146 void (*refcount_release) (struct ttm_base_object **),
147 void (*ref_obj_release) (struct ttm_base_object *,
148 enum ttm_ref_type ref_type))
149{
150 struct ttm_object_device *tdev = tfile->tdev;
151 int ret;
152
153 base->shareable = shareable;
154 base->tfile = ttm_object_file_ref(tfile);
155 base->refcount_release = refcount_release;
156 base->ref_obj_release = ref_obj_release;
157 base->object_type = object_type;
158 write_lock(&tdev->object_lock);
159 kref_init(&base->refcount);
160 ret = drm_ht_just_insert_please(&tdev->object_hash,
161 &base->hash,
162 (unsigned long)base, 31, 0, 0);
163 write_unlock(&tdev->object_lock);
164 if (unlikely(ret != 0))
165 goto out_err0;
166
167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
168 if (unlikely(ret != 0))
169 goto out_err1;
170
171 ttm_base_object_unref(&base);
172
173 return 0;
174out_err1:
175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
176out_err0:
177 return ret;
178}
179EXPORT_SYMBOL(ttm_base_object_init);
180
181static void ttm_release_base(struct kref *kref)
182{
183 struct ttm_base_object *base =
184 container_of(kref, struct ttm_base_object, refcount);
185 struct ttm_object_device *tdev = base->tfile->tdev;
186
187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
188 write_unlock(&tdev->object_lock);
189 if (base->refcount_release) {
190 ttm_object_file_unref(&base->tfile);
191 base->refcount_release(&base);
192 }
193 write_lock(&tdev->object_lock);
194}
195
196void ttm_base_object_unref(struct ttm_base_object **p_base)
197{
198 struct ttm_base_object *base = *p_base;
199 struct ttm_object_device *tdev = base->tfile->tdev;
200
201 *p_base = NULL;
202
203 /*
204 * Need to take the lock here to avoid racing with
205 * users trying to look up the object.
206 */
207
208 write_lock(&tdev->object_lock);
209 (void)kref_put(&base->refcount, &ttm_release_base);
210 write_unlock(&tdev->object_lock);
211}
212EXPORT_SYMBOL(ttm_base_object_unref);
213
214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
215 uint32_t key)
216{
217 struct ttm_object_device *tdev = tfile->tdev;
218 struct ttm_base_object *base;
219 struct drm_hash_item *hash;
220 int ret;
221
222 read_lock(&tdev->object_lock);
223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
224
225 if (likely(ret == 0)) {
226 base = drm_hash_entry(hash, struct ttm_base_object, hash);
227 kref_get(&base->refcount);
228 }
229 read_unlock(&tdev->object_lock);
230
231 if (unlikely(ret != 0))
232 return NULL;
233
234 if (tfile != base->tfile && !base->shareable) {
235 printk(KERN_ERR TTM_PFX
236 "Attempted access of non-shareable object.\n");
237 ttm_base_object_unref(&base);
238 return NULL;
239 }
240
241 return base;
242}
243EXPORT_SYMBOL(ttm_base_object_lookup);
244
245int ttm_ref_object_add(struct ttm_object_file *tfile,
246 struct ttm_base_object *base,
247 enum ttm_ref_type ref_type, bool *existed)
248{
249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
250 struct ttm_ref_object *ref;
251 struct drm_hash_item *hash;
252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
253 int ret = -EINVAL;
254
255 if (existed != NULL)
256 *existed = true;
257
258 while (ret == -EINVAL) {
259 read_lock(&tfile->lock);
260 ret = drm_ht_find_item(ht, base->hash.key, &hash);
261
262 if (ret == 0) {
263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
264 kref_get(&ref->kref);
265 read_unlock(&tfile->lock);
266 break;
267 }
268
269 read_unlock(&tfile->lock);
270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
271 false, false);
272 if (unlikely(ret != 0))
273 return ret;
274 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
275 if (unlikely(ref == NULL)) {
276 ttm_mem_global_free(mem_glob, sizeof(*ref));
277 return -ENOMEM;
278 }
279
280 ref->hash.key = base->hash.key;
281 ref->obj = base;
282 ref->tfile = tfile;
283 ref->ref_type = ref_type;
284 kref_init(&ref->kref);
285
286 write_lock(&tfile->lock);
287 ret = drm_ht_insert_item(ht, &ref->hash);
288
289 if (likely(ret == 0)) {
290 list_add_tail(&ref->head, &tfile->ref_list);
291 kref_get(&base->refcount);
292 write_unlock(&tfile->lock);
293 if (existed != NULL)
294 *existed = false;
295 break;
296 }
297
298 write_unlock(&tfile->lock);
299 BUG_ON(ret != -EINVAL);
300
301 ttm_mem_global_free(mem_glob, sizeof(*ref));
302 kfree(ref);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(ttm_ref_object_add);
308
309static void ttm_ref_object_release(struct kref *kref)
310{
311 struct ttm_ref_object *ref =
312 container_of(kref, struct ttm_ref_object, kref);
313 struct ttm_base_object *base = ref->obj;
314 struct ttm_object_file *tfile = ref->tfile;
315 struct drm_open_hash *ht;
316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
317
318 ht = &tfile->ref_hash[ref->ref_type];
319 (void)drm_ht_remove_item(ht, &ref->hash);
320 list_del(&ref->head);
321 write_unlock(&tfile->lock);
322
323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
324 base->ref_obj_release(base, ref->ref_type);
325
326 ttm_base_object_unref(&ref->obj);
327 ttm_mem_global_free(mem_glob, sizeof(*ref));
328 kfree(ref);
329 write_lock(&tfile->lock);
330}
331
332int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
333 unsigned long key, enum ttm_ref_type ref_type)
334{
335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
336 struct ttm_ref_object *ref;
337 struct drm_hash_item *hash;
338 int ret;
339
340 write_lock(&tfile->lock);
341 ret = drm_ht_find_item(ht, key, &hash);
342 if (unlikely(ret != 0)) {
343 write_unlock(&tfile->lock);
344 return -EINVAL;
345 }
346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
347 kref_put(&ref->kref, ttm_ref_object_release);
348 write_unlock(&tfile->lock);
349 return 0;
350}
351EXPORT_SYMBOL(ttm_ref_object_base_unref);
352
353void ttm_object_file_release(struct ttm_object_file **p_tfile)
354{
355 struct ttm_ref_object *ref;
356 struct list_head *list;
357 unsigned int i;
358 struct ttm_object_file *tfile = *p_tfile;
359
360 *p_tfile = NULL;
361 write_lock(&tfile->lock);
362
363 /*
364 * Since we release the lock within the loop, we have to
365 * restart it from the beginning each time.
366 */
367
368 while (!list_empty(&tfile->ref_list)) {
369 list = tfile->ref_list.next;
370 ref = list_entry(list, struct ttm_ref_object, head);
371 ttm_ref_object_release(&ref->kref);
372 }
373
374 for (i = 0; i < TTM_REF_NUM; ++i)
375 drm_ht_remove(&tfile->ref_hash[i]);
376
377 write_unlock(&tfile->lock);
378 ttm_object_file_unref(&tfile);
379}
380EXPORT_SYMBOL(ttm_object_file_release);
381
382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
383 unsigned int hash_order)
384{
385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
386 unsigned int i;
387 unsigned int j = 0;
388 int ret;
389
390 if (unlikely(tfile == NULL))
391 return NULL;
392
393 rwlock_init(&tfile->lock);
394 tfile->tdev = tdev;
395 kref_init(&tfile->refcount);
396 INIT_LIST_HEAD(&tfile->ref_list);
397
398 for (i = 0; i < TTM_REF_NUM; ++i) {
399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
400 if (ret) {
401 j = i;
402 goto out_err;
403 }
404 }
405
406 return tfile;
407out_err:
408 for (i = 0; i < j; ++i)
409 drm_ht_remove(&tfile->ref_hash[i]);
410
411 kfree(tfile);
412
413 return NULL;
414}
415EXPORT_SYMBOL(ttm_object_file_init);
416
417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
418 *mem_glob,
419 unsigned int hash_order)
420{
421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
422 int ret;
423
424 if (unlikely(tdev == NULL))
425 return NULL;
426
427 tdev->mem_glob = mem_glob;
428 rwlock_init(&tdev->object_lock);
429 atomic_set(&tdev->object_count, 0);
430 ret = drm_ht_create(&tdev->object_hash, hash_order);
431
432 if (likely(ret == 0))
433 return tdev;
434
435 kfree(tdev);
436 return NULL;
437}
438EXPORT_SYMBOL(ttm_object_device_init);
439
440void ttm_object_device_release(struct ttm_object_device **p_tdev)
441{
442 struct ttm_object_device *tdev = *p_tdev;
443
444 *p_tdev = NULL;
445
446 write_lock(&tdev->object_lock);
447 drm_ht_remove(&tdev->object_hash);
448 write_unlock(&tdev->object_lock);
449
450 kfree(tdev);
451}
452EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce8..9c2b1cc5dba5 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
192 ttm->state = tt_unbound; 192 ttm->state = tt_unbound;
193 return 0; 193 return 0;
194} 194}
195EXPORT_SYMBOL(ttm_tt_populate);
195 196
196#ifdef CONFIG_X86 197#ifdef CONFIG_X86
197static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,