diff options
Diffstat (limited to 'drivers/gpu')
81 files changed, 6782 insertions, 1762 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index f831ea159291..96eddd17e050 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -92,6 +92,7 @@ config DRM_I830 | |||
92 | config DRM_I915 | 92 | config DRM_I915 |
93 | tristate "i915 driver" | 93 | tristate "i915 driver" |
94 | depends on AGP_INTEL | 94 | depends on AGP_INTEL |
95 | select SHMEM | ||
95 | select DRM_KMS_HELPER | 96 | select DRM_KMS_HELPER |
96 | select FB_CFB_FILLRECT | 97 | select FB_CFB_FILLRECT |
97 | select FB_CFB_COPYAREA | 98 | select FB_CFB_COPYAREA |
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 3c8827a7aabd..91567ac806f1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
15 | 15 | ||
16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
17 | 17 | ||
18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o | 18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o |
19 | 19 | ||
20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | 20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o |
21 | 21 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 3bc870d38a97..4a7bbdbedfc2 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = | |||
125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, | 125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, |
126 | drm_tv_subconnector_enum_list) | 126 | drm_tv_subconnector_enum_list) |
127 | 127 | ||
128 | static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { | ||
129 | { DRM_MODE_DIRTY_OFF, "Off" }, | ||
130 | { DRM_MODE_DIRTY_ON, "On" }, | ||
131 | { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, | ||
132 | }; | ||
133 | |||
134 | DRM_ENUM_NAME_FN(drm_get_dirty_info_name, | ||
135 | drm_dirty_info_enum_list) | ||
136 | |||
128 | struct drm_conn_prop_enum_list { | 137 | struct drm_conn_prop_enum_list { |
129 | int type; | 138 | int type; |
130 | char *name; | 139 | char *name; |
@@ -803,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev) | |||
803 | EXPORT_SYMBOL(drm_mode_create_dithering_property); | 812 | EXPORT_SYMBOL(drm_mode_create_dithering_property); |
804 | 813 | ||
805 | /** | 814 | /** |
815 | * drm_mode_create_dirty_property - create dirty property | ||
816 | * @dev: DRM device | ||
817 | * | ||
818 | * Called by a driver the first time it's needed, must be attached to desired | ||
819 | * connectors. | ||
820 | */ | ||
821 | int drm_mode_create_dirty_info_property(struct drm_device *dev) | ||
822 | { | ||
823 | struct drm_property *dirty_info; | ||
824 | int i; | ||
825 | |||
826 | if (dev->mode_config.dirty_info_property) | ||
827 | return 0; | ||
828 | |||
829 | dirty_info = | ||
830 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
831 | DRM_MODE_PROP_IMMUTABLE, | ||
832 | "dirty", | ||
833 | ARRAY_SIZE(drm_dirty_info_enum_list)); | ||
834 | for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) | ||
835 | drm_property_add_enum(dirty_info, i, | ||
836 | drm_dirty_info_enum_list[i].type, | ||
837 | drm_dirty_info_enum_list[i].name); | ||
838 | dev->mode_config.dirty_info_property = dirty_info; | ||
839 | |||
840 | return 0; | ||
841 | } | ||
842 | EXPORT_SYMBOL(drm_mode_create_dirty_info_property); | ||
843 | |||
844 | /** | ||
806 | * drm_mode_config_init - initialize DRM mode_configuration structure | 845 | * drm_mode_config_init - initialize DRM mode_configuration structure |
807 | * @dev: DRM device | 846 | * @dev: DRM device |
808 | * | 847 | * |
@@ -1754,6 +1793,71 @@ out: | |||
1754 | return ret; | 1793 | return ret; |
1755 | } | 1794 | } |
1756 | 1795 | ||
1796 | int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | ||
1797 | void *data, struct drm_file *file_priv) | ||
1798 | { | ||
1799 | struct drm_clip_rect __user *clips_ptr; | ||
1800 | struct drm_clip_rect *clips = NULL; | ||
1801 | struct drm_mode_fb_dirty_cmd *r = data; | ||
1802 | struct drm_mode_object *obj; | ||
1803 | struct drm_framebuffer *fb; | ||
1804 | unsigned flags; | ||
1805 | int num_clips; | ||
1806 | int ret = 0; | ||
1807 | |||
1808 | mutex_lock(&dev->mode_config.mutex); | ||
1809 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | ||
1810 | if (!obj) { | ||
1811 | DRM_ERROR("invalid framebuffer id\n"); | ||
1812 | ret = -EINVAL; | ||
1813 | goto out_err1; | ||
1814 | } | ||
1815 | fb = obj_to_fb(obj); | ||
1816 | |||
1817 | num_clips = r->num_clips; | ||
1818 | clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; | ||
1819 | |||
1820 | if (!num_clips != !clips_ptr) { | ||
1821 | ret = -EINVAL; | ||
1822 | goto out_err1; | ||
1823 | } | ||
1824 | |||
1825 | flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; | ||
1826 | |||
1827 | /* If userspace annotates copy, clips must come in pairs */ | ||
1828 | if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { | ||
1829 | ret = -EINVAL; | ||
1830 | goto out_err1; | ||
1831 | } | ||
1832 | |||
1833 | if (num_clips && clips_ptr) { | ||
1834 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
1835 | if (!clips) { | ||
1836 | ret = -ENOMEM; | ||
1837 | goto out_err1; | ||
1838 | } | ||
1839 | |||
1840 | ret = copy_from_user(clips, clips_ptr, | ||
1841 | num_clips * sizeof(*clips)); | ||
1842 | if (ret) | ||
1843 | goto out_err2; | ||
1844 | } | ||
1845 | |||
1846 | if (fb->funcs->dirty) { | ||
1847 | ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); | ||
1848 | } else { | ||
1849 | ret = -ENOSYS; | ||
1850 | goto out_err2; | ||
1851 | } | ||
1852 | |||
1853 | out_err2: | ||
1854 | kfree(clips); | ||
1855 | out_err1: | ||
1856 | mutex_unlock(&dev->mode_config.mutex); | ||
1857 | return ret; | ||
1858 | } | ||
1859 | |||
1860 | |||
1757 | /** | 1861 | /** |
1758 | * drm_fb_release - remove and free the FBs on this file | 1862 | * drm_fb_release - remove and free the FBs on this file |
1759 | * @filp: file * from the ioctl | 1863 | * @filp: file * from the ioctl |
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 1fe4e1d344fd..3963b3c1081a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
109 | 109 | ||
110 | count = (*connector_funcs->get_modes)(connector); | 110 | count = (*connector_funcs->get_modes)(connector); |
111 | if (!count) { | 111 | if (!count) { |
112 | count = drm_add_modes_noedid(connector, 800, 600); | 112 | count = drm_add_modes_noedid(connector, 1024, 768); |
113 | if (!count) | 113 | if (!count) |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
@@ -331,6 +331,7 @@ create_mode: | |||
331 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, | 331 | cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60, |
332 | cmdline_mode->rb, cmdline_mode->interlace, | 332 | cmdline_mode->rb, cmdline_mode->interlace, |
333 | cmdline_mode->margins); | 333 | cmdline_mode->margins); |
334 | drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); | ||
334 | list_add(&mode->head, &connector->modes); | 335 | list_add(&mode->head, &connector->modes); |
335 | return mode; | 336 | return mode; |
336 | } | 337 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c index a57273ade677..548887c8506f 100644 --- a/drivers/gpu/drm/i915/intel_dp_i2c.c +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c | |||
@@ -28,84 +28,20 @@ | |||
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
31 | #include "intel_dp.h" | 31 | #include "drm_dp_helper.h" |
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | 33 | ||
34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | 34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ |
35 | |||
36 | #define MODE_I2C_START 1 | ||
37 | #define MODE_I2C_WRITE 2 | ||
38 | #define MODE_I2C_READ 4 | ||
39 | #define MODE_I2C_STOP 8 | ||
40 | |||
41 | static int | 35 | static int |
42 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | 36 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, |
43 | uint8_t write_byte, uint8_t *read_byte) | 37 | uint8_t write_byte, uint8_t *read_byte) |
44 | { | 38 | { |
45 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 39 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
46 | uint16_t address = algo_data->address; | ||
47 | uint8_t msg[5]; | ||
48 | uint8_t reply[2]; | ||
49 | int msg_bytes; | ||
50 | int reply_bytes; | ||
51 | int ret; | 40 | int ret; |
52 | 41 | ||
53 | /* Set up the command byte */ | 42 | ret = (*algo_data->aux_ch)(adapter, mode, |
54 | if (mode & MODE_I2C_READ) | 43 | write_byte, read_byte); |
55 | msg[0] = AUX_I2C_READ << 4; | 44 | return ret; |
56 | else | ||
57 | msg[0] = AUX_I2C_WRITE << 4; | ||
58 | |||
59 | if (!(mode & MODE_I2C_STOP)) | ||
60 | msg[0] |= AUX_I2C_MOT << 4; | ||
61 | |||
62 | msg[1] = address >> 8; | ||
63 | msg[2] = address; | ||
64 | |||
65 | switch (mode) { | ||
66 | case MODE_I2C_WRITE: | ||
67 | msg[3] = 0; | ||
68 | msg[4] = write_byte; | ||
69 | msg_bytes = 5; | ||
70 | reply_bytes = 1; | ||
71 | break; | ||
72 | case MODE_I2C_READ: | ||
73 | msg[3] = 0; | ||
74 | msg_bytes = 4; | ||
75 | reply_bytes = 2; | ||
76 | break; | ||
77 | default: | ||
78 | msg_bytes = 3; | ||
79 | reply_bytes = 1; | ||
80 | break; | ||
81 | } | ||
82 | |||
83 | for (;;) { | ||
84 | ret = (*algo_data->aux_ch)(adapter, | ||
85 | msg, msg_bytes, | ||
86 | reply, reply_bytes); | ||
87 | if (ret < 0) { | ||
88 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | ||
89 | return ret; | ||
90 | } | ||
91 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
92 | case AUX_I2C_REPLY_ACK: | ||
93 | if (mode == MODE_I2C_READ) { | ||
94 | *read_byte = reply[1]; | ||
95 | } | ||
96 | return reply_bytes - 1; | ||
97 | case AUX_I2C_REPLY_NACK: | ||
98 | DRM_DEBUG_KMS("aux_ch nack\n"); | ||
99 | return -EREMOTEIO; | ||
100 | case AUX_I2C_REPLY_DEFER: | ||
101 | DRM_DEBUG_KMS("aux_ch defer\n"); | ||
102 | udelay(100); | ||
103 | break; | ||
104 | default: | ||
105 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
106 | return -EREMOTEIO; | ||
107 | } | ||
108 | } | ||
109 | } | 45 | } |
110 | 46 | ||
111 | /* | 47 | /* |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bfaf59b02bda..ff2f1042cb44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -146,6 +146,7 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | 148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), |
149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) | ||
149 | }; | 150 | }; |
150 | 151 | ||
151 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 152 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index cea665d86dd3..c39b26f1abed 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -123,18 +123,20 @@ static const u8 edid_header[] = { | |||
123 | */ | 123 | */ |
124 | static bool edid_is_valid(struct edid *edid) | 124 | static bool edid_is_valid(struct edid *edid) |
125 | { | 125 | { |
126 | int i; | 126 | int i, score = 0; |
127 | u8 csum = 0; | 127 | u8 csum = 0; |
128 | u8 *raw_edid = (u8 *)edid; | 128 | u8 *raw_edid = (u8 *)edid; |
129 | 129 | ||
130 | if (memcmp(edid->header, edid_header, sizeof(edid_header))) | 130 | for (i = 0; i < sizeof(edid_header); i++) |
131 | goto bad; | 131 | if (raw_edid[i] == edid_header[i]) |
132 | if (edid->version != 1) { | 132 | score++; |
133 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | 133 | |
134 | if (score == 8) ; | ||
135 | else if (score >= 6) { | ||
136 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | ||
137 | memcpy(raw_edid, edid_header, sizeof(edid_header)); | ||
138 | } else | ||
134 | goto bad; | 139 | goto bad; |
135 | } | ||
136 | if (edid->revision > 4) | ||
137 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
138 | 140 | ||
139 | for (i = 0; i < EDID_LENGTH; i++) | 141 | for (i = 0; i < EDID_LENGTH; i++) |
140 | csum += raw_edid[i]; | 142 | csum += raw_edid[i]; |
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid) | |||
143 | goto bad; | 145 | goto bad; |
144 | } | 146 | } |
145 | 147 | ||
148 | if (edid->version != 1) { | ||
149 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | ||
150 | goto bad; | ||
151 | } | ||
152 | |||
153 | if (edid->revision > 4) | ||
154 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
155 | |||
146 | return 1; | 156 | return 1; |
147 | 157 | ||
148 | bad: | 158 | bad: |
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
481 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, | 491 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, |
482 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 492 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
483 | }; | 493 | }; |
494 | static const int drm_num_dmt_modes = | ||
495 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
484 | 496 | ||
485 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | 497 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, |
486 | int hsize, int vsize, int fresh) | 498 | int hsize, int vsize, int fresh) |
487 | { | 499 | { |
488 | int i, count; | 500 | int i; |
489 | struct drm_display_mode *ptr, *mode; | 501 | struct drm_display_mode *ptr, *mode; |
490 | 502 | ||
491 | count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
492 | mode = NULL; | 503 | mode = NULL; |
493 | for (i = 0; i < count; i++) { | 504 | for (i = 0; i < drm_num_dmt_modes; i++) { |
494 | ptr = &drm_dmt_modes[i]; | 505 | ptr = &drm_dmt_modes[i]; |
495 | if (hsize == ptr->hdisplay && | 506 | if (hsize == ptr->hdisplay && |
496 | vsize == ptr->vdisplay && | 507 | vsize == ptr->vdisplay && |
@@ -662,6 +673,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, | |||
662 | return NULL; | 673 | return NULL; |
663 | } | 674 | } |
664 | 675 | ||
676 | /* Some EDIDs have bogus h/vtotal values */ | ||
677 | if (mode->hsync_end > mode->htotal) | ||
678 | mode->htotal = mode->hsync_end + 1; | ||
679 | if (mode->vsync_end > mode->vtotal) | ||
680 | mode->vtotal = mode->vsync_end + 1; | ||
681 | |||
665 | drm_mode_set_name(mode); | 682 | drm_mode_set_name(mode); |
666 | 683 | ||
667 | if (pt->misc & DRM_EDID_PT_INTERLACED) | 684 | if (pt->misc & DRM_EDID_PT_INTERLACED) |
@@ -828,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
828 | return modes; | 845 | return modes; |
829 | } | 846 | } |
830 | 847 | ||
848 | /* | ||
849 | * XXX fix this for: | ||
850 | * - GTF secondary curve formula | ||
851 | * - EDID 1.4 range offsets | ||
852 | * - CVT extended bits | ||
853 | */ | ||
854 | static bool | ||
855 | mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) | ||
856 | { | ||
857 | struct detailed_data_monitor_range *range; | ||
858 | int hsync, vrefresh; | ||
859 | |||
860 | range = &timing->data.other_data.data.range; | ||
861 | |||
862 | hsync = drm_mode_hsync(mode); | ||
863 | vrefresh = drm_mode_vrefresh(mode); | ||
864 | |||
865 | if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) | ||
866 | return false; | ||
867 | |||
868 | if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) | ||
869 | return false; | ||
870 | |||
871 | if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { | ||
872 | /* be forgiving since it's in units of 10MHz */ | ||
873 | int max_clock = range->pixel_clock_mhz * 10 + 9; | ||
874 | max_clock *= 1000; | ||
875 | if (mode->clock > max_clock) | ||
876 | return false; | ||
877 | } | ||
878 | |||
879 | return true; | ||
880 | } | ||
881 | |||
882 | /* | ||
883 | * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will | ||
884 | * need to account for them. | ||
885 | */ | ||
886 | static int drm_gtf_modes_for_range(struct drm_connector *connector, | ||
887 | struct detailed_timing *timing) | ||
888 | { | ||
889 | int i, modes = 0; | ||
890 | struct drm_display_mode *newmode; | ||
891 | struct drm_device *dev = connector->dev; | ||
892 | |||
893 | for (i = 0; i < drm_num_dmt_modes; i++) { | ||
894 | if (mode_in_range(drm_dmt_modes + i, timing)) { | ||
895 | newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); | ||
896 | if (newmode) { | ||
897 | drm_mode_probed_add(connector, newmode); | ||
898 | modes++; | ||
899 | } | ||
900 | } | ||
901 | } | ||
902 | |||
903 | return modes; | ||
904 | } | ||
905 | |||
906 | static int drm_cvt_modes(struct drm_connector *connector, | ||
907 | struct detailed_timing *timing) | ||
908 | { | ||
909 | int i, j, modes = 0; | ||
910 | struct drm_display_mode *newmode; | ||
911 | struct drm_device *dev = connector->dev; | ||
912 | struct cvt_timing *cvt; | ||
913 | const int rates[] = { 60, 85, 75, 60, 50 }; | ||
914 | |||
915 | for (i = 0; i < 4; i++) { | ||
916 | int width, height; | ||
917 | cvt = &(timing->data.other_data.data.cvt[i]); | ||
918 | |||
919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; | ||
920 | switch (cvt->code[1] & 0xc0) { | ||
921 | case 0x00: | ||
922 | width = height * 4 / 3; | ||
923 | break; | ||
924 | case 0x40: | ||
925 | width = height * 16 / 9; | ||
926 | break; | ||
927 | case 0x80: | ||
928 | width = height * 16 / 10; | ||
929 | break; | ||
930 | case 0xc0: | ||
931 | width = height * 15 / 9; | ||
932 | break; | ||
933 | } | ||
934 | |||
935 | for (j = 1; j < 5; j++) { | ||
936 | if (cvt->code[2] & (1 << j)) { | ||
937 | newmode = drm_cvt_mode(dev, width, height, | ||
938 | rates[j], j == 0, | ||
939 | false, false); | ||
940 | if (newmode) { | ||
941 | drm_mode_probed_add(connector, newmode); | ||
942 | modes++; | ||
943 | } | ||
944 | } | ||
945 | } | ||
946 | } | ||
947 | |||
948 | return modes; | ||
949 | } | ||
950 | |||
951 | static int add_detailed_modes(struct drm_connector *connector, | ||
952 | struct detailed_timing *timing, | ||
953 | struct edid *edid, u32 quirks, int preferred) | ||
954 | { | ||
955 | int i, modes = 0; | ||
956 | struct detailed_non_pixel *data = &timing->data.other_data; | ||
957 | int timing_level = standard_timing_level(edid); | ||
958 | int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); | ||
959 | struct drm_display_mode *newmode; | ||
960 | struct drm_device *dev = connector->dev; | ||
961 | |||
962 | if (timing->pixel_clock) { | ||
963 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
964 | if (!newmode) | ||
965 | return 0; | ||
966 | |||
967 | if (preferred) | ||
968 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | ||
969 | |||
970 | drm_mode_probed_add(connector, newmode); | ||
971 | return 1; | ||
972 | } | ||
973 | |||
974 | /* other timing types */ | ||
975 | switch (data->type) { | ||
976 | case EDID_DETAIL_MONITOR_RANGE: | ||
977 | if (gtf) | ||
978 | modes += drm_gtf_modes_for_range(connector, timing); | ||
979 | break; | ||
980 | case EDID_DETAIL_STD_MODES: | ||
981 | /* Six modes per detailed section */ | ||
982 | for (i = 0; i < 6; i++) { | ||
983 | struct std_timing *std; | ||
984 | struct drm_display_mode *newmode; | ||
985 | |||
986 | std = &data->data.timings[i]; | ||
987 | newmode = drm_mode_std(dev, std, edid->revision, | ||
988 | timing_level); | ||
989 | if (newmode) { | ||
990 | drm_mode_probed_add(connector, newmode); | ||
991 | modes++; | ||
992 | } | ||
993 | } | ||
994 | break; | ||
995 | case EDID_DETAIL_CVT_3BYTE: | ||
996 | modes += drm_cvt_modes(connector, timing); | ||
997 | break; | ||
998 | default: | ||
999 | break; | ||
1000 | } | ||
1001 | |||
1002 | return modes; | ||
1003 | } | ||
1004 | |||
831 | /** | 1005 | /** |
832 | * add_detailed_modes - get detailed mode info from EDID data | 1006 | * add_detailed_info - get detailed mode info from EDID data |
833 | * @connector: attached connector | 1007 | * @connector: attached connector |
834 | * @edid: EDID block to scan | 1008 | * @edid: EDID block to scan |
835 | * @quirks: quirks to apply | 1009 | * @quirks: quirks to apply |
@@ -840,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
840 | static int add_detailed_info(struct drm_connector *connector, | 1014 | static int add_detailed_info(struct drm_connector *connector, |
841 | struct edid *edid, u32 quirks) | 1015 | struct edid *edid, u32 quirks) |
842 | { | 1016 | { |
843 | struct drm_device *dev = connector->dev; | 1017 | int i, modes = 0; |
844 | int i, j, modes = 0; | ||
845 | int timing_level; | ||
846 | |||
847 | timing_level = standard_timing_level(edid); | ||
848 | 1018 | ||
849 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { | 1019 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { |
850 | struct detailed_timing *timing = &edid->detailed_timings[i]; | 1020 | struct detailed_timing *timing = &edid->detailed_timings[i]; |
851 | struct detailed_non_pixel *data = &timing->data.other_data; | 1021 | int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); |
852 | struct drm_display_mode *newmode; | ||
853 | 1022 | ||
854 | /* X server check is version 1.1 or higher */ | 1023 | /* In 1.0, only timings are allowed */ |
855 | if (edid->version == 1 && edid->revision >= 1 && | 1024 | if (!timing->pixel_clock && edid->version == 1 && |
856 | !timing->pixel_clock) { | 1025 | edid->revision == 0) |
857 | /* Other timing or info */ | 1026 | continue; |
858 | switch (data->type) { | ||
859 | case EDID_DETAIL_MONITOR_SERIAL: | ||
860 | break; | ||
861 | case EDID_DETAIL_MONITOR_STRING: | ||
862 | break; | ||
863 | case EDID_DETAIL_MONITOR_RANGE: | ||
864 | /* Get monitor range data */ | ||
865 | break; | ||
866 | case EDID_DETAIL_MONITOR_NAME: | ||
867 | break; | ||
868 | case EDID_DETAIL_MONITOR_CPDATA: | ||
869 | break; | ||
870 | case EDID_DETAIL_STD_MODES: | ||
871 | for (j = 0; j < 6; i++) { | ||
872 | struct std_timing *std; | ||
873 | struct drm_display_mode *newmode; | ||
874 | |||
875 | std = &data->data.timings[j]; | ||
876 | newmode = drm_mode_std(dev, std, | ||
877 | edid->revision, | ||
878 | timing_level); | ||
879 | if (newmode) { | ||
880 | drm_mode_probed_add(connector, newmode); | ||
881 | modes++; | ||
882 | } | ||
883 | } | ||
884 | break; | ||
885 | default: | ||
886 | break; | ||
887 | } | ||
888 | } else { | ||
889 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
890 | if (!newmode) | ||
891 | continue; | ||
892 | |||
893 | /* First detailed mode is preferred */ | ||
894 | if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) | ||
895 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | ||
896 | drm_mode_probed_add(connector, newmode); | ||
897 | 1027 | ||
898 | modes++; | 1028 | modes += add_detailed_modes(connector, timing, edid, quirks, |
899 | } | 1029 | preferred); |
900 | } | 1030 | } |
901 | 1031 | ||
902 | return modes; | 1032 | return modes; |
903 | } | 1033 | } |
1034 | |||
904 | /** | 1035 | /** |
905 | * add_detailed_mode_eedid - get detailed mode info from addtional timing | 1036 | * add_detailed_mode_eedid - get detailed mode info from addtional timing |
906 | * EDID block | 1037 | * EDID block |
@@ -914,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector, | |||
914 | static int add_detailed_info_eedid(struct drm_connector *connector, | 1045 | static int add_detailed_info_eedid(struct drm_connector *connector, |
915 | struct edid *edid, u32 quirks) | 1046 | struct edid *edid, u32 quirks) |
916 | { | 1047 | { |
917 | struct drm_device *dev = connector->dev; | 1048 | int i, modes = 0; |
918 | int i, j, modes = 0; | ||
919 | char *edid_ext = NULL; | 1049 | char *edid_ext = NULL; |
920 | struct detailed_timing *timing; | 1050 | struct detailed_timing *timing; |
921 | struct detailed_non_pixel *data; | ||
922 | struct drm_display_mode *newmode; | ||
923 | int edid_ext_num; | 1051 | int edid_ext_num; |
924 | int start_offset, end_offset; | 1052 | int start_offset, end_offset; |
925 | int timing_level; | 1053 | int timing_level; |
@@ -970,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
970 | for (i = start_offset; i < end_offset; | 1098 | for (i = start_offset; i < end_offset; |
971 | i += sizeof(struct detailed_timing)) { | 1099 | i += sizeof(struct detailed_timing)) { |
972 | timing = (struct detailed_timing *)(edid_ext + i); | 1100 | timing = (struct detailed_timing *)(edid_ext + i); |
973 | data = &timing->data.other_data; | 1101 | modes += add_detailed_modes(connector, timing, edid, quirks, 0); |
974 | /* Detailed mode timing */ | ||
975 | if (timing->pixel_clock) { | ||
976 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
977 | if (!newmode) | ||
978 | continue; | ||
979 | |||
980 | drm_mode_probed_add(connector, newmode); | ||
981 | |||
982 | modes++; | ||
983 | continue; | ||
984 | } | ||
985 | |||
986 | /* Other timing or info */ | ||
987 | switch (data->type) { | ||
988 | case EDID_DETAIL_MONITOR_SERIAL: | ||
989 | break; | ||
990 | case EDID_DETAIL_MONITOR_STRING: | ||
991 | break; | ||
992 | case EDID_DETAIL_MONITOR_RANGE: | ||
993 | /* Get monitor range data */ | ||
994 | break; | ||
995 | case EDID_DETAIL_MONITOR_NAME: | ||
996 | break; | ||
997 | case EDID_DETAIL_MONITOR_CPDATA: | ||
998 | break; | ||
999 | case EDID_DETAIL_STD_MODES: | ||
1000 | /* Five modes per detailed section */ | ||
1001 | for (j = 0; j < 5; i++) { | ||
1002 | struct std_timing *std; | ||
1003 | struct drm_display_mode *newmode; | ||
1004 | |||
1005 | std = &data->data.timings[j]; | ||
1006 | newmode = drm_mode_std(dev, std, | ||
1007 | edid->revision, | ||
1008 | timing_level); | ||
1009 | if (newmode) { | ||
1010 | drm_mode_probed_add(connector, newmode); | ||
1011 | modes++; | ||
1012 | } | ||
1013 | } | ||
1014 | break; | ||
1015 | default: | ||
1016 | break; | ||
1017 | } | ||
1018 | } | 1102 | } |
1019 | 1103 | ||
1020 | return modes; | 1104 | return modes; |
@@ -1060,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector, | |||
1060 | struct i2c_adapter *adapter, | 1144 | struct i2c_adapter *adapter, |
1061 | char *buf, int len) | 1145 | char *buf, int len) |
1062 | { | 1146 | { |
1063 | int ret; | 1147 | int i; |
1064 | 1148 | ||
1065 | ret = drm_do_probe_ddc_edid(adapter, buf, len); | 1149 | for (i = 0; i < 4; i++) { |
1066 | if (ret != 0) { | 1150 | if (drm_do_probe_ddc_edid(adapter, buf, len)) |
1067 | goto end; | 1151 | return -1; |
1068 | } | 1152 | if (edid_is_valid((struct edid *)buf)) |
1069 | if (!edid_is_valid((struct edid *)buf)) { | 1153 | return 0; |
1070 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
1071 | drm_get_connector_name(connector)); | ||
1072 | ret = -1; | ||
1073 | } | 1154 | } |
1074 | end: | 1155 | |
1075 | return ret; | 1156 | /* repeated checksum failures; warn, but carry on */ |
1157 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
1158 | drm_get_connector_name(connector)); | ||
1159 | return -1; | ||
1076 | } | 1160 | } |
1077 | 1161 | ||
1078 | /** | 1162 | /** |
@@ -1290,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector, | |||
1290 | ptr->vdisplay > vdisplay) | 1374 | ptr->vdisplay > vdisplay) |
1291 | continue; | 1375 | continue; |
1292 | } | 1376 | } |
1377 | if (drm_mode_vrefresh(ptr) > 61) | ||
1378 | continue; | ||
1293 | mode = drm_mode_duplicate(dev, ptr); | 1379 | mode = drm_mode_duplicate(dev, ptr); |
1294 | if (mode) { | 1380 | if (mode) { |
1295 | drm_mode_probed_add(connector, mode); | 1381 | drm_mode_probed_add(connector, mode); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index dc8e374a0b55..1b49fa055f4f 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
373 | mutex_unlock(&dev->mode_config.mutex); | 373 | mutex_unlock(&dev->mode_config.mutex); |
374 | } | 374 | } |
375 | } | 375 | } |
376 | if (dpms_mode == DRM_MODE_DPMS_OFF) { | 376 | mutex_lock(&dev->mode_config.mutex); |
377 | mutex_lock(&dev->mode_config.mutex); | 377 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
378 | crtc_funcs->dpms(crtc, dpms_mode); | 378 | mutex_unlock(&dev->mode_config.mutex); |
379 | mutex_unlock(&dev->mode_config.mutex); | ||
380 | } | ||
381 | } | 379 | } |
382 | } | 380 | } |
383 | } | 381 | } |
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
385 | int drm_fb_helper_blank(int blank, struct fb_info *info) | 383 | int drm_fb_helper_blank(int blank, struct fb_info *info) |
386 | { | 384 | { |
387 | switch (blank) { | 385 | switch (blank) { |
386 | /* Display: On; HSync: On, VSync: On */ | ||
388 | case FB_BLANK_UNBLANK: | 387 | case FB_BLANK_UNBLANK: |
389 | drm_fb_helper_on(info); | 388 | drm_fb_helper_on(info); |
390 | break; | 389 | break; |
390 | /* Display: Off; HSync: On, VSync: On */ | ||
391 | case FB_BLANK_NORMAL: | 391 | case FB_BLANK_NORMAL: |
392 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_ON); |
393 | break; | 393 | break; |
394 | /* Display: Off; HSync: Off, VSync: On */ | ||
394 | case FB_BLANK_HSYNC_SUSPEND: | 395 | case FB_BLANK_HSYNC_SUSPEND: |
395 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 396 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); |
396 | break; | 397 | break; |
398 | /* Display: Off; HSync: On, VSync: Off */ | ||
397 | case FB_BLANK_VSYNC_SUSPEND: | 399 | case FB_BLANK_VSYNC_SUSPEND: |
398 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); | 400 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); |
399 | break; | 401 | break; |
402 | /* Display: Off; HSync: Off, VSync: Off */ | ||
400 | case FB_BLANK_POWERDOWN: | 403 | case FB_BLANK_POWERDOWN: |
401 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); | 404 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); |
402 | break; | 405 | break; |
@@ -599,7 +602,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
599 | struct drm_framebuffer *fb = fb_helper->fb; | 602 | struct drm_framebuffer *fb = fb_helper->fb; |
600 | int depth; | 603 | int depth; |
601 | 604 | ||
602 | if (var->pixclock == -1 || !var->pixclock) | 605 | if (var->pixclock != 0) |
603 | return -EINVAL; | 606 | return -EINVAL; |
604 | 607 | ||
605 | /* Need to resize the fb object !!! */ | 608 | /* Need to resize the fb object !!! */ |
@@ -691,7 +694,7 @@ int drm_fb_helper_set_par(struct fb_info *info) | |||
691 | int ret; | 694 | int ret; |
692 | int i; | 695 | int i; |
693 | 696 | ||
694 | if (var->pixclock != -1) { | 697 | if (var->pixclock != 0) { |
695 | DRM_ERROR("PIXEL CLCOK SET\n"); | 698 | DRM_ERROR("PIXEL CLCOK SET\n"); |
696 | return -EINVAL; | 699 | return -EINVAL; |
697 | } | 700 | } |
@@ -904,9 +907,14 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
904 | fb_helper->fb = fb; | 907 | fb_helper->fb = fb; |
905 | 908 | ||
906 | if (new_fb) { | 909 | if (new_fb) { |
907 | info->var.pixclock = -1; | 910 | info->var.pixclock = 0; |
908 | if (register_framebuffer(info) < 0) | 911 | ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0); |
912 | if (ret) | ||
913 | return ret; | ||
914 | if (register_framebuffer(info) < 0) { | ||
915 | fb_dealloc_cmap(&info->cmap); | ||
909 | return -EINVAL; | 916 | return -EINVAL; |
917 | } | ||
910 | } else { | 918 | } else { |
911 | drm_fb_helper_set_par(info); | 919 | drm_fb_helper_set_par(info); |
912 | } | 920 | } |
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
936 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); | 944 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); |
937 | } | 945 | } |
938 | drm_fb_helper_crtc_free(helper); | 946 | drm_fb_helper_crtc_free(helper); |
947 | fb_dealloc_cmap(&helper->fb->fbdev->cmap); | ||
939 | } | 948 | } |
940 | EXPORT_SYMBOL(drm_fb_helper_free); | 949 | EXPORT_SYMBOL(drm_fb_helper_free); |
941 | 950 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 8ac7fbf6b2b7..08d14df3bb42 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -300,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
300 | goto out_free; | 300 | goto out_free; |
301 | } | 301 | } |
302 | } | 302 | } |
303 | mutex_lock(&dev->struct_mutex); | ||
304 | if (dev->driver->master_set) { | ||
305 | ret = dev->driver->master_set(dev, priv, true); | ||
306 | if (ret) { | ||
307 | /* drop both references if this fails */ | ||
308 | drm_master_put(&priv->minor->master); | ||
309 | drm_master_put(&priv->master); | ||
310 | mutex_unlock(&dev->struct_mutex); | ||
311 | goto out_free; | ||
312 | } | ||
313 | } | ||
314 | mutex_unlock(&dev->struct_mutex); | ||
303 | } else { | 315 | } else { |
304 | /* get a reference to the master */ | 316 | /* get a reference to the master */ |
305 | priv->master = drm_master_get(priv->minor->master); | 317 | priv->master = drm_master_get(priv->minor->master); |
@@ -533,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
533 | 545 | ||
534 | if (file_priv->minor->master == file_priv->master) { | 546 | if (file_priv->minor->master == file_priv->master) { |
535 | /* drop the reference held my the minor */ | 547 | /* drop the reference held my the minor */ |
548 | if (dev->driver->master_drop) | ||
549 | dev->driver->master_drop(dev, file_priv, true); | ||
536 | drm_master_put(&file_priv->minor->master); | 550 | drm_master_put(&file_priv->minor->master); |
537 | } | 551 | } |
538 | } | 552 | } |
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 80391995bdec..e9dbb481c469 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
552 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; | 552 | vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; |
553 | vma->vm_ops = obj->dev->driver->gem_vm_ops; | 553 | vma->vm_ops = obj->dev->driver->gem_vm_ops; |
554 | vma->vm_private_data = map->handle; | 554 | vma->vm_private_data = map->handle; |
555 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | 555 | vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
556 | 556 | ||
557 | /* Take a ref for this mapping of the object, so that the fault | 557 | /* Take a ref for this mapping of the object, so that the fault |
558 | * handler can dereference the mmap offset's pointer to the object. | 558 | * handler can dereference the mmap offset's pointer to the object. |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index c861d80fd779..1f0d717dbad6 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) | |||
103 | return child; | 103 | return child; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* drm_mm_pre_get() - pre allocate drm_mm_node structure | ||
107 | * drm_mm: memory manager struct we are pre-allocating for | ||
108 | * | ||
109 | * Returns 0 on success or -ENOMEM if allocation fails. | ||
110 | */ | ||
106 | int drm_mm_pre_get(struct drm_mm *mm) | 111 | int drm_mm_pre_get(struct drm_mm *mm) |
107 | { | 112 | { |
108 | struct drm_mm_node *node; | 113 | struct drm_mm_node *node; |
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur) | |||
253 | prev_node->size += next_node->size; | 258 | prev_node->size += next_node->size; |
254 | list_del(&next_node->ml_entry); | 259 | list_del(&next_node->ml_entry); |
255 | list_del(&next_node->fl_entry); | 260 | list_del(&next_node->fl_entry); |
261 | spin_lock(&mm->unused_lock); | ||
256 | if (mm->num_unused < MM_UNUSED_TARGET) { | 262 | if (mm->num_unused < MM_UNUSED_TARGET) { |
257 | list_add(&next_node->fl_entry, | 263 | list_add(&next_node->fl_entry, |
258 | &mm->unused_nodes); | 264 | &mm->unused_nodes); |
259 | ++mm->num_unused; | 265 | ++mm->num_unused; |
260 | } else | 266 | } else |
261 | kfree(next_node); | 267 | kfree(next_node); |
268 | spin_unlock(&mm->unused_lock); | ||
262 | } else { | 269 | } else { |
263 | next_node->size += cur->size; | 270 | next_node->size += cur->size; |
264 | next_node->start = cur->start; | 271 | next_node->start = cur->start; |
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur) | |||
271 | list_add(&cur->fl_entry, &mm->fl_entry); | 278 | list_add(&cur->fl_entry, &mm->fl_entry); |
272 | } else { | 279 | } else { |
273 | list_del(&cur->ml_entry); | 280 | list_del(&cur->ml_entry); |
281 | spin_lock(&mm->unused_lock); | ||
274 | if (mm->num_unused < MM_UNUSED_TARGET) { | 282 | if (mm->num_unused < MM_UNUSED_TARGET) { |
275 | list_add(&cur->fl_entry, &mm->unused_nodes); | 283 | list_add(&cur->fl_entry, &mm->unused_nodes); |
276 | ++mm->num_unused; | 284 | ++mm->num_unused; |
277 | } else | 285 | } else |
278 | kfree(cur); | 286 | kfree(cur); |
287 | spin_unlock(&mm->unused_lock); | ||
279 | } | 288 | } |
280 | } | 289 | } |
281 | 290 | ||
@@ -386,7 +395,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | |||
386 | else | 395 | else |
387 | total_used += entry->size; | 396 | total_used += entry->size; |
388 | } | 397 | } |
389 | seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); | 398 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); |
390 | return 0; | 399 | return 0; |
391 | } | 400 | } |
392 | EXPORT_SYMBOL(drm_mm_dump_table); | 401 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 51f677215f1d..6d81a02463a3 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode) | |||
553 | } | 553 | } |
554 | EXPORT_SYMBOL(drm_mode_height); | 554 | EXPORT_SYMBOL(drm_mode_height); |
555 | 555 | ||
556 | /** drm_mode_hsync - get the hsync of a mode | ||
557 | * @mode: mode | ||
558 | * | ||
559 | * LOCKING: | ||
560 | * None. | ||
561 | * | ||
562 | * Return @modes's hsync rate in kHz, rounded to the nearest int. | ||
563 | */ | ||
564 | int drm_mode_hsync(struct drm_display_mode *mode) | ||
565 | { | ||
566 | unsigned int calc_val; | ||
567 | |||
568 | if (mode->hsync) | ||
569 | return mode->hsync; | ||
570 | |||
571 | if (mode->htotal < 0) | ||
572 | return 0; | ||
573 | |||
574 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | ||
575 | calc_val += 500; /* round to 1000Hz */ | ||
576 | calc_val /= 1000; /* truncate to kHz */ | ||
577 | |||
578 | return calc_val; | ||
579 | } | ||
580 | EXPORT_SYMBOL(drm_mode_hsync); | ||
581 | |||
556 | /** | 582 | /** |
557 | * drm_mode_vrefresh - get the vrefresh of a mode | 583 | * drm_mode_vrefresh - get the vrefresh of a mode |
558 | * @mode: mode | 584 | * @mode: mode |
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height); | |||
560 | * LOCKING: | 586 | * LOCKING: |
561 | * None. | 587 | * None. |
562 | * | 588 | * |
563 | * Return @mode's vrefresh rate or calculate it if necessary. | 589 | * Return @mode's vrefresh rate in Hz or calculate it if necessary. |
564 | * | 590 | * |
565 | * FIXME: why is this needed? shouldn't vrefresh be set already? | 591 | * FIXME: why is this needed? shouldn't vrefresh be set already? |
566 | * | 592 | * |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index adb864dfef3e..ad73e141afdb 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master) | |||
128 | kref_get(&master->refcount); | 128 | kref_get(&master->refcount); |
129 | return master; | 129 | return master; |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(drm_master_get); | ||
131 | 132 | ||
132 | static void drm_master_destroy(struct kref *kref) | 133 | static void drm_master_destroy(struct kref *kref) |
133 | { | 134 | { |
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master) | |||
170 | kref_put(&(*master)->refcount, drm_master_destroy); | 171 | kref_put(&(*master)->refcount, drm_master_destroy); |
171 | *master = NULL; | 172 | *master = NULL; |
172 | } | 173 | } |
174 | EXPORT_SYMBOL(drm_master_put); | ||
173 | 175 | ||
174 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, | 176 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
175 | struct drm_file *file_priv) | 177 | struct drm_file *file_priv) |
176 | { | 178 | { |
179 | int ret = 0; | ||
180 | |||
177 | if (file_priv->is_master) | 181 | if (file_priv->is_master) |
178 | return 0; | 182 | return 0; |
179 | 183 | ||
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
188 | mutex_lock(&dev->struct_mutex); | 192 | mutex_lock(&dev->struct_mutex); |
189 | file_priv->minor->master = drm_master_get(file_priv->master); | 193 | file_priv->minor->master = drm_master_get(file_priv->master); |
190 | file_priv->is_master = 1; | 194 | file_priv->is_master = 1; |
195 | if (dev->driver->master_set) { | ||
196 | ret = dev->driver->master_set(dev, file_priv, false); | ||
197 | if (unlikely(ret != 0)) { | ||
198 | file_priv->is_master = 0; | ||
199 | drm_master_put(&file_priv->minor->master); | ||
200 | } | ||
201 | } | ||
191 | mutex_unlock(&dev->struct_mutex); | 202 | mutex_unlock(&dev->struct_mutex); |
192 | } | 203 | } |
193 | 204 | ||
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | |||
204 | return -EINVAL; | 215 | return -EINVAL; |
205 | 216 | ||
206 | mutex_lock(&dev->struct_mutex); | 217 | mutex_lock(&dev->struct_mutex); |
218 | if (dev->driver->master_drop) | ||
219 | dev->driver->master_drop(dev, file_priv, false); | ||
207 | drm_master_put(&file_priv->minor->master); | 220 | drm_master_put(&file_priv->minor->master); |
208 | file_priv->is_master = 0; | 221 | file_priv->is_master = 0; |
209 | mutex_unlock(&dev->struct_mutex); | 222 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 87b21996cd6a..9929f84ec3e1 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
15 | intel_lvds.o \ | 15 | intel_lvds.o \ |
16 | intel_bios.o \ | 16 | intel_bios.o \ |
17 | intel_dp.o \ | 17 | intel_dp.o \ |
18 | intel_dp_i2c.o \ | ||
19 | intel_hdmi.o \ | 18 | intel_hdmi.o \ |
20 | intel_sdvo.o \ | 19 | intel_sdvo.o \ |
21 | intel_modes.o \ | 20 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1d6171087298..fbecac72f5bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -305,6 +305,7 @@ typedef struct drm_i915_private { | |||
305 | u32 saveVBLANK_A; | 305 | u32 saveVBLANK_A; |
306 | u32 saveVSYNC_A; | 306 | u32 saveVSYNC_A; |
307 | u32 saveBCLRPAT_A; | 307 | u32 saveBCLRPAT_A; |
308 | u32 saveTRANSACONF; | ||
308 | u32 saveTRANS_HTOTAL_A; | 309 | u32 saveTRANS_HTOTAL_A; |
309 | u32 saveTRANS_HBLANK_A; | 310 | u32 saveTRANS_HBLANK_A; |
310 | u32 saveTRANS_HSYNC_A; | 311 | u32 saveTRANS_HSYNC_A; |
@@ -335,6 +336,7 @@ typedef struct drm_i915_private { | |||
335 | u32 saveVBLANK_B; | 336 | u32 saveVBLANK_B; |
336 | u32 saveVSYNC_B; | 337 | u32 saveVSYNC_B; |
337 | u32 saveBCLRPAT_B; | 338 | u32 saveBCLRPAT_B; |
339 | u32 saveTRANSBCONF; | ||
338 | u32 saveTRANS_HTOTAL_B; | 340 | u32 saveTRANS_HTOTAL_B; |
339 | u32 saveTRANS_HBLANK_B; | 341 | u32 saveTRANS_HBLANK_B; |
340 | u32 saveTRANS_HSYNC_B; | 342 | u32 saveTRANS_HSYNC_B; |
@@ -421,6 +423,16 @@ typedef struct drm_i915_private { | |||
421 | u32 savePFB_WIN_SZ; | 423 | u32 savePFB_WIN_SZ; |
422 | u32 savePFA_WIN_POS; | 424 | u32 savePFA_WIN_POS; |
423 | u32 savePFB_WIN_POS; | 425 | u32 savePFB_WIN_POS; |
426 | u32 savePCH_DREF_CONTROL; | ||
427 | u32 saveDISP_ARB_CTL; | ||
428 | u32 savePIPEA_DATA_M1; | ||
429 | u32 savePIPEA_DATA_N1; | ||
430 | u32 savePIPEA_LINK_M1; | ||
431 | u32 savePIPEA_LINK_N1; | ||
432 | u32 savePIPEB_DATA_M1; | ||
433 | u32 savePIPEB_DATA_N1; | ||
434 | u32 savePIPEB_LINK_M1; | ||
435 | u32 savePIPEB_LINK_N1; | ||
424 | 436 | ||
425 | struct { | 437 | struct { |
426 | struct drm_mm gtt_space; | 438 | struct drm_mm gtt_space; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a1345d78e138..85f4c5de97e2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -273,10 +273,15 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
273 | { | 273 | { |
274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 274 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
275 | int ret = IRQ_NONE; | 275 | int ret = IRQ_NONE; |
276 | u32 de_iir, gt_iir, pch_iir; | 276 | u32 de_iir, gt_iir, de_ier, pch_iir; |
277 | u32 new_de_iir, new_gt_iir, new_pch_iir; | 277 | u32 new_de_iir, new_gt_iir, new_pch_iir; |
278 | struct drm_i915_master_private *master_priv; | 278 | struct drm_i915_master_private *master_priv; |
279 | 279 | ||
280 | /* disable master interrupt before clearing iir */ | ||
281 | de_ier = I915_READ(DEIER); | ||
282 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | ||
283 | (void)I915_READ(DEIER); | ||
284 | |||
280 | de_iir = I915_READ(DEIIR); | 285 | de_iir = I915_READ(DEIIR); |
281 | gt_iir = I915_READ(GTIIR); | 286 | gt_iir = I915_READ(GTIIR); |
282 | pch_iir = I915_READ(SDEIIR); | 287 | pch_iir = I915_READ(SDEIIR); |
@@ -324,6 +329,9 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
324 | pch_iir = new_pch_iir; | 329 | pch_iir = new_pch_iir; |
325 | } | 330 | } |
326 | 331 | ||
332 | I915_WRITE(DEIER, de_ier); | ||
333 | (void)I915_READ(DEIER); | ||
334 | |||
327 | return ret; | 335 | return ret; |
328 | } | 336 | } |
329 | 337 | ||
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 00f6d97c7cc5..d5ebb00a9d49 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | if (IS_IRONLAKE(dev)) { | ||
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | ||
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | ||
245 | } | ||
246 | |||
242 | /* Pipe & plane A info */ | 247 | /* Pipe & plane A info */ |
243 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); | 248 | dev_priv->savePIPEACONF = I915_READ(PIPEACONF); |
244 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); | 249 | dev_priv->savePIPEASRC = I915_READ(PIPEASRC); |
@@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
263 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); | 268 | dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); |
264 | 269 | ||
265 | if (IS_IRONLAKE(dev)) { | 270 | if (IS_IRONLAKE(dev)) { |
271 | dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); | ||
272 | dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); | ||
273 | dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); | ||
274 | dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1); | ||
275 | |||
266 | dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); | 276 | dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL); |
267 | dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); | 277 | dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL); |
268 | 278 | ||
@@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
270 | dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); | 280 | dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ); |
271 | dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); | 281 | dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS); |
272 | 282 | ||
283 | dev_priv->saveTRANSACONF = I915_READ(TRANSACONF); | ||
273 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); | 284 | dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A); |
274 | dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); | 285 | dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A); |
275 | dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); | 286 | dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A); |
@@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
314 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); | 325 | dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); |
315 | 326 | ||
316 | if (IS_IRONLAKE(dev)) { | 327 | if (IS_IRONLAKE(dev)) { |
328 | dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); | ||
329 | dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); | ||
330 | dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); | ||
331 | dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1); | ||
332 | |||
317 | dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); | 333 | dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL); |
318 | dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); | 334 | dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL); |
319 | 335 | ||
@@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
321 | dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); | 337 | dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ); |
322 | dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); | 338 | dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS); |
323 | 339 | ||
340 | dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF); | ||
324 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); | 341 | dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B); |
325 | dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); | 342 | dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B); |
326 | dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); | 343 | dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B); |
@@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
368 | fpb1_reg = FPB1; | 385 | fpb1_reg = FPB1; |
369 | } | 386 | } |
370 | 387 | ||
388 | if (IS_IRONLAKE(dev)) { | ||
389 | I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); | ||
390 | I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); | ||
391 | } | ||
392 | |||
371 | /* Pipe & plane A info */ | 393 | /* Pipe & plane A info */ |
372 | /* Prime the clock */ | 394 | /* Prime the clock */ |
373 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { | 395 | if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { |
@@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
395 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); | 417 | I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); |
396 | 418 | ||
397 | if (IS_IRONLAKE(dev)) { | 419 | if (IS_IRONLAKE(dev)) { |
420 | I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); | ||
421 | I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); | ||
422 | I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); | ||
423 | I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); | ||
424 | |||
398 | I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); | 425 | I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); |
399 | I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); | 426 | I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); |
400 | 427 | ||
@@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
402 | I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); | 429 | I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); |
403 | I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); | 430 | I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS); |
404 | 431 | ||
432 | I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF); | ||
405 | I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); | 433 | I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); |
406 | I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); | 434 | I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); |
407 | I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); | 435 | I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); |
@@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
439 | /* Actually enable it */ | 467 | /* Actually enable it */ |
440 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); | 468 | I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); |
441 | DRM_UDELAY(150); | 469 | DRM_UDELAY(150); |
442 | if (IS_I965G(dev)) | 470 | if (IS_I965G(dev) && !IS_IRONLAKE(dev)) |
443 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); | 471 | I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); |
444 | DRM_UDELAY(150); | 472 | DRM_UDELAY(150); |
445 | 473 | ||
@@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
454 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); | 482 | I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); |
455 | 483 | ||
456 | if (IS_IRONLAKE(dev)) { | 484 | if (IS_IRONLAKE(dev)) { |
485 | I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); | ||
486 | I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); | ||
487 | I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); | ||
488 | I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); | ||
489 | |||
457 | I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); | 490 | I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); |
458 | I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); | 491 | I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); |
459 | 492 | ||
@@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
461 | I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); | 494 | I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); |
462 | I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); | 495 | I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS); |
463 | 496 | ||
497 | I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF); | ||
464 | I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); | 498 | I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); |
465 | I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); | 499 | I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); |
466 | I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); | 500 | I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index ec5df0f88417..9f3d3e563414 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -234,8 +234,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) | |||
234 | } while (time_after(timeout, jiffies)); | 234 | } while (time_after(timeout, jiffies)); |
235 | } | 235 | } |
236 | 236 | ||
237 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) == | 237 | if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) != |
238 | CRT_HOTPLUG_MONITOR_COLOR) | 238 | CRT_HOTPLUG_MONITOR_NONE) |
239 | return true; | 239 | return true; |
240 | 240 | ||
241 | return false; | 241 | return false; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 279dc96e3eb2..52cd9b006da2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include "intel_drv.h" | 32 | #include "intel_drv.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_dp.h" | 35 | #include "drm_dp_helper.h" |
36 | 36 | ||
37 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
38 | 38 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 632f1b44c28a..4e7aa8b7b938 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "intel_dp.h" | 36 | #include "drm_dp_helper.h" |
37 | 37 | ||
38 | 38 | ||
39 | #define DP_LINK_STATUS_SIZE 6 | 39 | #define DP_LINK_STATUS_SIZE 6 |
@@ -383,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
383 | } | 383 | } |
384 | 384 | ||
385 | static int | 385 | static int |
386 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, | 386 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
387 | uint8_t *send, int send_bytes, | 387 | uint8_t write_byte, uint8_t *read_byte) |
388 | uint8_t *recv, int recv_bytes) | ||
389 | { | 388 | { |
389 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
390 | struct intel_dp_priv *dp_priv = container_of(adapter, | 390 | struct intel_dp_priv *dp_priv = container_of(adapter, |
391 | struct intel_dp_priv, | 391 | struct intel_dp_priv, |
392 | adapter); | 392 | adapter); |
393 | struct intel_output *intel_output = dp_priv->intel_output; | 393 | struct intel_output *intel_output = dp_priv->intel_output; |
394 | uint16_t address = algo_data->address; | ||
395 | uint8_t msg[5]; | ||
396 | uint8_t reply[2]; | ||
397 | int msg_bytes; | ||
398 | int reply_bytes; | ||
399 | int ret; | ||
400 | |||
401 | /* Set up the command byte */ | ||
402 | if (mode & MODE_I2C_READ) | ||
403 | msg[0] = AUX_I2C_READ << 4; | ||
404 | else | ||
405 | msg[0] = AUX_I2C_WRITE << 4; | ||
406 | |||
407 | if (!(mode & MODE_I2C_STOP)) | ||
408 | msg[0] |= AUX_I2C_MOT << 4; | ||
409 | |||
410 | msg[1] = address >> 8; | ||
411 | msg[2] = address; | ||
412 | |||
413 | switch (mode) { | ||
414 | case MODE_I2C_WRITE: | ||
415 | msg[3] = 0; | ||
416 | msg[4] = write_byte; | ||
417 | msg_bytes = 5; | ||
418 | reply_bytes = 1; | ||
419 | break; | ||
420 | case MODE_I2C_READ: | ||
421 | msg[3] = 0; | ||
422 | msg_bytes = 4; | ||
423 | reply_bytes = 2; | ||
424 | break; | ||
425 | default: | ||
426 | msg_bytes = 3; | ||
427 | reply_bytes = 1; | ||
428 | break; | ||
429 | } | ||
394 | 430 | ||
395 | return intel_dp_aux_ch(intel_output, | 431 | for (;;) { |
396 | send, send_bytes, recv, recv_bytes); | 432 | ret = intel_dp_aux_ch(intel_output, |
433 | msg, msg_bytes, | ||
434 | reply, reply_bytes); | ||
435 | if (ret < 0) { | ||
436 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | ||
437 | return ret; | ||
438 | } | ||
439 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
440 | case AUX_I2C_REPLY_ACK: | ||
441 | if (mode == MODE_I2C_READ) { | ||
442 | *read_byte = reply[1]; | ||
443 | } | ||
444 | return reply_bytes - 1; | ||
445 | case AUX_I2C_REPLY_NACK: | ||
446 | DRM_DEBUG_KMS("aux_ch nack\n"); | ||
447 | return -EREMOTEIO; | ||
448 | case AUX_I2C_REPLY_DEFER: | ||
449 | DRM_DEBUG_KMS("aux_ch defer\n"); | ||
450 | udelay(100); | ||
451 | break; | ||
452 | default: | ||
453 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
454 | return -EREMOTEIO; | ||
455 | } | ||
456 | } | ||
397 | } | 457 | } |
398 | 458 | ||
399 | static int | 459 | static int |
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h deleted file mode 100644 index 2b38054d3b6d..000000000000 --- a/drivers/gpu/drm/i915/intel_dp.h +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _INTEL_DP_H_ | ||
24 | #define _INTEL_DP_H_ | ||
25 | |||
26 | /* From the VESA DisplayPort spec */ | ||
27 | |||
28 | #define AUX_NATIVE_WRITE 0x8 | ||
29 | #define AUX_NATIVE_READ 0x9 | ||
30 | #define AUX_I2C_WRITE 0x0 | ||
31 | #define AUX_I2C_READ 0x1 | ||
32 | #define AUX_I2C_STATUS 0x2 | ||
33 | #define AUX_I2C_MOT 0x4 | ||
34 | |||
35 | #define AUX_NATIVE_REPLY_ACK (0x0 << 4) | ||
36 | #define AUX_NATIVE_REPLY_NACK (0x1 << 4) | ||
37 | #define AUX_NATIVE_REPLY_DEFER (0x2 << 4) | ||
38 | #define AUX_NATIVE_REPLY_MASK (0x3 << 4) | ||
39 | |||
40 | #define AUX_I2C_REPLY_ACK (0x0 << 6) | ||
41 | #define AUX_I2C_REPLY_NACK (0x1 << 6) | ||
42 | #define AUX_I2C_REPLY_DEFER (0x2 << 6) | ||
43 | #define AUX_I2C_REPLY_MASK (0x3 << 6) | ||
44 | |||
45 | /* AUX CH addresses */ | ||
46 | #define DP_LINK_BW_SET 0x100 | ||
47 | # define DP_LINK_BW_1_62 0x06 | ||
48 | # define DP_LINK_BW_2_7 0x0a | ||
49 | |||
50 | #define DP_LANE_COUNT_SET 0x101 | ||
51 | # define DP_LANE_COUNT_MASK 0x0f | ||
52 | # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) | ||
53 | |||
54 | #define DP_TRAINING_PATTERN_SET 0x102 | ||
55 | |||
56 | # define DP_TRAINING_PATTERN_DISABLE 0 | ||
57 | # define DP_TRAINING_PATTERN_1 1 | ||
58 | # define DP_TRAINING_PATTERN_2 2 | ||
59 | # define DP_TRAINING_PATTERN_MASK 0x3 | ||
60 | |||
61 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) | ||
62 | # define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) | ||
63 | # define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) | ||
64 | # define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) | ||
65 | # define DP_LINK_QUAL_PATTERN_MASK (3 << 2) | ||
66 | |||
67 | # define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) | ||
68 | # define DP_LINK_SCRAMBLING_DISABLE (1 << 5) | ||
69 | |||
70 | # define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) | ||
71 | # define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) | ||
72 | # define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) | ||
73 | # define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) | ||
74 | |||
75 | #define DP_TRAINING_LANE0_SET 0x103 | ||
76 | #define DP_TRAINING_LANE1_SET 0x104 | ||
77 | #define DP_TRAINING_LANE2_SET 0x105 | ||
78 | #define DP_TRAINING_LANE3_SET 0x106 | ||
79 | |||
80 | # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 | ||
81 | # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 | ||
82 | # define DP_TRAIN_MAX_SWING_REACHED (1 << 2) | ||
83 | # define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) | ||
84 | # define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) | ||
85 | # define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) | ||
86 | # define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) | ||
87 | |||
88 | # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) | ||
89 | # define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) | ||
90 | # define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) | ||
91 | # define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) | ||
92 | # define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) | ||
93 | |||
94 | # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 | ||
95 | # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) | ||
96 | |||
97 | #define DP_DOWNSPREAD_CTRL 0x107 | ||
98 | # define DP_SPREAD_AMP_0_5 (1 << 4) | ||
99 | |||
100 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | ||
101 | # define DP_SET_ANSI_8B10B (1 << 0) | ||
102 | |||
103 | #define DP_LANE0_1_STATUS 0x202 | ||
104 | #define DP_LANE2_3_STATUS 0x203 | ||
105 | |||
106 | # define DP_LANE_CR_DONE (1 << 0) | ||
107 | # define DP_LANE_CHANNEL_EQ_DONE (1 << 1) | ||
108 | # define DP_LANE_SYMBOL_LOCKED (1 << 2) | ||
109 | |||
110 | #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 | ||
111 | |||
112 | #define DP_INTERLANE_ALIGN_DONE (1 << 0) | ||
113 | #define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) | ||
114 | #define DP_LINK_STATUS_UPDATED (1 << 7) | ||
115 | |||
116 | #define DP_SINK_STATUS 0x205 | ||
117 | |||
118 | #define DP_RECEIVE_PORT_0_STATUS (1 << 0) | ||
119 | #define DP_RECEIVE_PORT_1_STATUS (1 << 1) | ||
120 | |||
121 | #define DP_ADJUST_REQUEST_LANE0_1 0x206 | ||
122 | #define DP_ADJUST_REQUEST_LANE2_3 0x207 | ||
123 | |||
124 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 | ||
125 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 | ||
126 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c | ||
127 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 | ||
128 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 | ||
129 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 | ||
130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | ||
131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | ||
132 | |||
133 | struct i2c_algo_dp_aux_data { | ||
134 | bool running; | ||
135 | u16 address; | ||
136 | int (*aux_ch) (struct i2c_adapter *adapter, | ||
137 | uint8_t *send, int send_bytes, | ||
138 | uint8_t *recv, int recv_bytes); | ||
139 | }; | ||
140 | |||
141 | int | ||
142 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | ||
143 | |||
144 | #endif /* _INTEL_DP_H_ */ | ||
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index b5713eedd6e1..feb52eee4314 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \ | |||
49 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ | 49 | radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ |
50 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ | 50 | rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ |
51 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ | 51 | r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ |
52 | r600_blit_kms.o radeon_pm.o | 52 | r600_blit_kms.o radeon_pm.o atombios_dp.o |
53 | 53 | ||
54 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 54 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
55 | 55 | ||
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 901befe03da2..6578d19dff93 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, | |||
107 | base += 3; | 107 | base += 3; |
108 | break; | 108 | break; |
109 | case ATOM_IIO_WRITE: | 109 | case ATOM_IIO_WRITE: |
110 | (void)ctx->card->reg_read(ctx->card, CU16(base + 1)); | ||
110 | ctx->card->reg_write(ctx->card, CU16(base + 1), temp); | 111 | ctx->card->reg_write(ctx->card, CU16(base + 1), temp); |
111 | base += 3; | 112 | base += 3; |
112 | break; | 113 | break; |
@@ -262,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
262 | case ATOM_ARG_FB: | 263 | case ATOM_ARG_FB: |
263 | idx = U8(*ptr); | 264 | idx = U8(*ptr); |
264 | (*ptr)++; | 265 | (*ptr)++; |
266 | val = gctx->scratch[((gctx->fb_base + idx) / 4)]; | ||
265 | if (print) | 267 | if (print) |
266 | DEBUG("FB[0x%02X]", idx); | 268 | DEBUG("FB[0x%02X]", idx); |
267 | printk(KERN_INFO "FB access is not implemented.\n"); | 269 | break; |
268 | return 0; | ||
269 | case ATOM_ARG_IMM: | 270 | case ATOM_ARG_IMM: |
270 | switch (align) { | 271 | switch (align) { |
271 | case ATOM_SRC_DWORD: | 272 | case ATOM_SRC_DWORD: |
@@ -487,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
487 | case ATOM_ARG_FB: | 488 | case ATOM_ARG_FB: |
488 | idx = U8(*ptr); | 489 | idx = U8(*ptr); |
489 | (*ptr)++; | 490 | (*ptr)++; |
491 | gctx->scratch[((gctx->fb_base + idx) / 4)] = val; | ||
490 | DEBUG("FB[0x%02X]", idx); | 492 | DEBUG("FB[0x%02X]", idx); |
491 | printk(KERN_INFO "FB access is not implemented.\n"); | 493 | break; |
492 | return; | ||
493 | case ATOM_ARG_PLL: | 494 | case ATOM_ARG_PLL: |
494 | idx = U8(*ptr); | 495 | idx = U8(*ptr); |
495 | (*ptr)++; | 496 | (*ptr)++; |
@@ -1213,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, | |||
1213 | *crev = CU8(idx + 3); | 1214 | *crev = CU8(idx + 3); |
1214 | return; | 1215 | return; |
1215 | } | 1216 | } |
1217 | |||
1218 | int atom_allocate_fb_scratch(struct atom_context *ctx) | ||
1219 | { | ||
1220 | int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware); | ||
1221 | uint16_t data_offset; | ||
1222 | int usage_bytes; | ||
1223 | struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage; | ||
1224 | |||
1225 | atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset); | ||
1226 | |||
1227 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); | ||
1228 | |||
1229 | DRM_DEBUG("atom firmware requested %08x %dkb\n", | ||
1230 | firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, | ||
1231 | firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); | ||
1232 | |||
1233 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | ||
1234 | if (usage_bytes == 0) | ||
1235 | usage_bytes = 20 * 1024; | ||
1236 | /* allocate some scratch memory */ | ||
1237 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); | ||
1238 | if (!ctx->scratch) | ||
1239 | return -ENOMEM; | ||
1240 | return 0; | ||
1241 | } | ||
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index e6eb38f2bcae..6671848e5ea1 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -132,6 +132,7 @@ struct atom_context { | |||
132 | uint8_t shift; | 132 | uint8_t shift; |
133 | int cs_equal, cs_above; | 133 | int cs_equal, cs_above; |
134 | int io_mode; | 134 | int io_mode; |
135 | uint32_t *scratch; | ||
135 | }; | 136 | }; |
136 | 137 | ||
137 | extern int atom_debug; | 138 | extern int atom_debug; |
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *); | |||
142 | void atom_destroy(struct atom_context *); | 143 | void atom_destroy(struct atom_context *); |
143 | void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); | 144 | void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); |
144 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); | 145 | void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); |
146 | int atom_allocate_fb_scratch(struct atom_context *ctx); | ||
145 | #include "atom-types.h" | 147 | #include "atom-types.h" |
146 | #include "atombios.h" | 148 | #include "atombios.h" |
147 | #include "ObjectID.h" | 149 | #include "ObjectID.h" |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index c11ddddfb3b6..e83927644de4 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD { | |||
2680 | typedef struct _ATOM_HPD_INT_RECORD { | 2680 | typedef struct _ATOM_HPD_INT_RECORD { |
2681 | ATOM_COMMON_RECORD_HEADER sheader; | 2681 | ATOM_COMMON_RECORD_HEADER sheader; |
2682 | UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ | 2682 | UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ |
2683 | UCHAR ucPluggged_PinState; | 2683 | UCHAR ucPlugged_PinState; |
2684 | } ATOM_HPD_INT_RECORD; | 2684 | } ATOM_HPD_INT_RECORD; |
2685 | 2685 | ||
2686 | typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { | 2686 | typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c15287a590ff..fba3c96b915b 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
241 | { | 241 | { |
242 | struct drm_device *dev = crtc->dev; | 242 | struct drm_device *dev = crtc->dev; |
243 | struct radeon_device *rdev = dev->dev_private; | 243 | struct radeon_device *rdev = dev->dev_private; |
244 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
244 | 245 | ||
245 | switch (mode) { | 246 | switch (mode) { |
246 | case DRM_MODE_DPMS_ON: | 247 | case DRM_MODE_DPMS_ON: |
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
248 | if (ASIC_IS_DCE3(rdev)) | 249 | if (ASIC_IS_DCE3(rdev)) |
249 | atombios_enable_crtc_memreq(crtc, 1); | 250 | atombios_enable_crtc_memreq(crtc, 1); |
250 | atombios_blank_crtc(crtc, 0); | 251 | atombios_blank_crtc(crtc, 0); |
252 | drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); | ||
253 | radeon_crtc_load_lut(crtc); | ||
251 | break; | 254 | break; |
252 | case DRM_MODE_DPMS_STANDBY: | 255 | case DRM_MODE_DPMS_STANDBY: |
253 | case DRM_MODE_DPMS_SUSPEND: | 256 | case DRM_MODE_DPMS_SUSPEND: |
254 | case DRM_MODE_DPMS_OFF: | 257 | case DRM_MODE_DPMS_OFF: |
258 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | ||
255 | atombios_blank_crtc(crtc, 1); | 259 | atombios_blank_crtc(crtc, 1); |
256 | if (ASIC_IS_DCE3(rdev)) | 260 | if (ASIC_IS_DCE3(rdev)) |
257 | atombios_enable_crtc_memreq(crtc, 0); | 261 | atombios_enable_crtc_memreq(crtc, 0); |
258 | atombios_enable_crtc(crtc, 0); | 262 | atombios_enable_crtc(crtc, 0); |
259 | break; | 263 | break; |
260 | } | 264 | } |
261 | |||
262 | if (mode != DRM_MODE_DPMS_OFF) { | ||
263 | radeon_crtc_load_lut(crtc); | ||
264 | } | ||
265 | } | 265 | } |
266 | 266 | ||
267 | static void | 267 | static void |
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
457 | if (encoder->encoder_type != | 457 | if (encoder->encoder_type != |
458 | DRM_MODE_ENCODER_DAC) | 458 | DRM_MODE_ENCODER_DAC) |
459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 459 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; |
460 | if (!ASIC_IS_AVIVO(rdev) | 460 | if (encoder->encoder_type == |
461 | && (encoder->encoder_type == | 461 | DRM_MODE_ENCODER_LVDS) |
462 | DRM_MODE_ENCODER_LVDS)) | ||
463 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 462 | pll_flags |= RADEON_PLL_USE_REF_DIV; |
464 | } | 463 | } |
465 | radeon_encoder = to_radeon_encoder(encoder); | 464 | radeon_encoder = to_radeon_encoder(encoder); |
@@ -574,21 +573,34 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
574 | struct radeon_device *rdev = dev->dev_private; | 573 | struct radeon_device *rdev = dev->dev_private; |
575 | struct radeon_framebuffer *radeon_fb; | 574 | struct radeon_framebuffer *radeon_fb; |
576 | struct drm_gem_object *obj; | 575 | struct drm_gem_object *obj; |
577 | struct drm_radeon_gem_object *obj_priv; | 576 | struct radeon_bo *rbo; |
578 | uint64_t fb_location; | 577 | uint64_t fb_location; |
579 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; | 578 | uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
579 | int r; | ||
580 | 580 | ||
581 | if (!crtc->fb) | 581 | /* no fb bound */ |
582 | return -EINVAL; | 582 | if (!crtc->fb) { |
583 | DRM_DEBUG("No FB bound\n"); | ||
584 | return 0; | ||
585 | } | ||
583 | 586 | ||
584 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 587 | radeon_fb = to_radeon_framebuffer(crtc->fb); |
585 | 588 | ||
589 | /* Pin framebuffer & get tilling informations */ | ||
586 | obj = radeon_fb->obj; | 590 | obj = radeon_fb->obj; |
587 | obj_priv = obj->driver_private; | 591 | rbo = obj->driver_private; |
588 | 592 | r = radeon_bo_reserve(rbo, false); | |
589 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { | 593 | if (unlikely(r != 0)) |
594 | return r; | ||
595 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); | ||
596 | if (unlikely(r != 0)) { | ||
597 | radeon_bo_unreserve(rbo); | ||
590 | return -EINVAL; | 598 | return -EINVAL; |
591 | } | 599 | } |
600 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
601 | radeon_bo_unreserve(rbo); | ||
602 | if (tiling_flags & RADEON_TILING_MACRO) | ||
603 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
592 | 604 | ||
593 | switch (crtc->fb->bits_per_pixel) { | 605 | switch (crtc->fb->bits_per_pixel) { |
594 | case 8: | 606 | case 8: |
@@ -618,11 +630,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
618 | return -EINVAL; | 630 | return -EINVAL; |
619 | } | 631 | } |
620 | 632 | ||
621 | radeon_object_get_tiling_flags(obj->driver_private, | ||
622 | &tiling_flags, NULL); | ||
623 | if (tiling_flags & RADEON_TILING_MACRO) | ||
624 | fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; | ||
625 | |||
626 | if (tiling_flags & RADEON_TILING_MICRO) | 633 | if (tiling_flags & RADEON_TILING_MICRO) |
627 | fb_format |= AVIVO_D1GRPH_TILED; | 634 | fb_format |= AVIVO_D1GRPH_TILED; |
628 | 635 | ||
@@ -674,7 +681,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
674 | 681 | ||
675 | if (old_fb && old_fb != crtc->fb) { | 682 | if (old_fb && old_fb != crtc->fb) { |
676 | radeon_fb = to_radeon_framebuffer(old_fb); | 683 | radeon_fb = to_radeon_framebuffer(old_fb); |
677 | radeon_gem_object_unpin(radeon_fb->obj); | 684 | rbo = radeon_fb->obj->driver_private; |
685 | r = radeon_bo_reserve(rbo, false); | ||
686 | if (unlikely(r != 0)) | ||
687 | return r; | ||
688 | radeon_bo_unpin(rbo); | ||
689 | radeon_bo_unreserve(rbo); | ||
678 | } | 690 | } |
679 | 691 | ||
680 | /* Bytes per pixel may have changed */ | 692 | /* Bytes per pixel may have changed */ |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c new file mode 100644 index 000000000000..0d63c4436e7c --- /dev/null +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -0,0 +1,790 @@ | |||
1 | /* | ||
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | ||
3 | * Copyright 2008 Red Hat Inc. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice shall be included in | ||
13 | * all copies or substantial portions of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
21 | * OTHER DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: Dave Airlie | ||
24 | * Alex Deucher | ||
25 | */ | ||
26 | #include "drmP.h" | ||
27 | #include "radeon_drm.h" | ||
28 | #include "radeon.h" | ||
29 | |||
30 | #include "atom.h" | ||
31 | #include "atom-bits.h" | ||
32 | #include "drm_dp_helper.h" | ||
33 | |||
34 | /* move these to drm_dp_helper.c/h */ | ||
35 | #define DP_LINK_CONFIGURATION_SIZE 9 | ||
36 | #define DP_LINK_STATUS_SIZE 6 | ||
37 | #define DP_DPCD_SIZE 8 | ||
38 | |||
39 | static char *voltage_names[] = { | ||
40 | "0.4V", "0.6V", "0.8V", "1.2V" | ||
41 | }; | ||
42 | static char *pre_emph_names[] = { | ||
43 | "0dB", "3.5dB", "6dB", "9.5dB" | ||
44 | }; | ||
45 | |||
46 | static const int dp_clocks[] = { | ||
47 | 54000, /* 1 lane, 1.62 Ghz */ | ||
48 | 90000, /* 1 lane, 2.70 Ghz */ | ||
49 | 108000, /* 2 lane, 1.62 Ghz */ | ||
50 | 180000, /* 2 lane, 2.70 Ghz */ | ||
51 | 216000, /* 4 lane, 1.62 Ghz */ | ||
52 | 360000, /* 4 lane, 2.70 Ghz */ | ||
53 | }; | ||
54 | |||
55 | static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int); | ||
56 | |||
57 | /* common helper functions */ | ||
58 | static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | ||
59 | { | ||
60 | int i; | ||
61 | u8 max_link_bw; | ||
62 | u8 max_lane_count; | ||
63 | |||
64 | if (!dpcd) | ||
65 | return 0; | ||
66 | |||
67 | max_link_bw = dpcd[DP_MAX_LINK_RATE]; | ||
68 | max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | ||
69 | |||
70 | switch (max_link_bw) { | ||
71 | case DP_LINK_BW_1_62: | ||
72 | default: | ||
73 | for (i = 0; i < num_dp_clocks; i++) { | ||
74 | if (i % 2) | ||
75 | continue; | ||
76 | switch (max_lane_count) { | ||
77 | case 1: | ||
78 | if (i > 1) | ||
79 | return 0; | ||
80 | break; | ||
81 | case 2: | ||
82 | if (i > 3) | ||
83 | return 0; | ||
84 | break; | ||
85 | case 4: | ||
86 | default: | ||
87 | break; | ||
88 | } | ||
89 | if (dp_clocks[i] > mode_clock) { | ||
90 | if (i < 2) | ||
91 | return 1; | ||
92 | else if (i < 4) | ||
93 | return 2; | ||
94 | else | ||
95 | return 4; | ||
96 | } | ||
97 | } | ||
98 | break; | ||
99 | case DP_LINK_BW_2_7: | ||
100 | for (i = 0; i < num_dp_clocks; i++) { | ||
101 | switch (max_lane_count) { | ||
102 | case 1: | ||
103 | if (i > 1) | ||
104 | return 0; | ||
105 | break; | ||
106 | case 2: | ||
107 | if (i > 3) | ||
108 | return 0; | ||
109 | break; | ||
110 | case 4: | ||
111 | default: | ||
112 | break; | ||
113 | } | ||
114 | if (dp_clocks[i] > mode_clock) { | ||
115 | if (i < 2) | ||
116 | return 1; | ||
117 | else if (i < 4) | ||
118 | return 2; | ||
119 | else | ||
120 | return 4; | ||
121 | } | ||
122 | } | ||
123 | break; | ||
124 | } | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | ||
130 | { | ||
131 | int i; | ||
132 | u8 max_link_bw; | ||
133 | u8 max_lane_count; | ||
134 | |||
135 | if (!dpcd) | ||
136 | return 0; | ||
137 | |||
138 | max_link_bw = dpcd[DP_MAX_LINK_RATE]; | ||
139 | max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; | ||
140 | |||
141 | switch (max_link_bw) { | ||
142 | case DP_LINK_BW_1_62: | ||
143 | default: | ||
144 | for (i = 0; i < num_dp_clocks; i++) { | ||
145 | if (i % 2) | ||
146 | continue; | ||
147 | switch (max_lane_count) { | ||
148 | case 1: | ||
149 | if (i > 1) | ||
150 | return 0; | ||
151 | break; | ||
152 | case 2: | ||
153 | if (i > 3) | ||
154 | return 0; | ||
155 | break; | ||
156 | case 4: | ||
157 | default: | ||
158 | break; | ||
159 | } | ||
160 | if (dp_clocks[i] > mode_clock) | ||
161 | return 162000; | ||
162 | } | ||
163 | break; | ||
164 | case DP_LINK_BW_2_7: | ||
165 | for (i = 0; i < num_dp_clocks; i++) { | ||
166 | switch (max_lane_count) { | ||
167 | case 1: | ||
168 | if (i > 1) | ||
169 | return 0; | ||
170 | break; | ||
171 | case 2: | ||
172 | if (i > 3) | ||
173 | return 0; | ||
174 | break; | ||
175 | case 4: | ||
176 | default: | ||
177 | break; | ||
178 | } | ||
179 | if (dp_clocks[i] > mode_clock) | ||
180 | return (i % 2) ? 270000 : 162000; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock) | ||
188 | { | ||
189 | int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock); | ||
190 | int bw = dp_lanes_for_mode_clock(dpcd, mode_clock); | ||
191 | |||
192 | if ((lanes == 0) || (bw == 0)) | ||
193 | return MODE_CLOCK_HIGH; | ||
194 | |||
195 | return MODE_OK; | ||
196 | } | ||
197 | |||
198 | static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r) | ||
199 | { | ||
200 | return link_status[r - DP_LANE0_1_STATUS]; | ||
201 | } | ||
202 | |||
203 | static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE], | ||
204 | int lane) | ||
205 | { | ||
206 | int i = DP_LANE0_1_STATUS + (lane >> 1); | ||
207 | int s = (lane & 1) * 4; | ||
208 | u8 l = dp_link_status(link_status, i); | ||
209 | return (l >> s) & 0xf; | ||
210 | } | ||
211 | |||
212 | static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
213 | int lane_count) | ||
214 | { | ||
215 | int lane; | ||
216 | u8 lane_status; | ||
217 | |||
218 | for (lane = 0; lane < lane_count; lane++) { | ||
219 | lane_status = dp_get_lane_status(link_status, lane); | ||
220 | if ((lane_status & DP_LANE_CR_DONE) == 0) | ||
221 | return false; | ||
222 | } | ||
223 | return true; | ||
224 | } | ||
225 | |||
226 | static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE], | ||
227 | int lane_count) | ||
228 | { | ||
229 | u8 lane_align; | ||
230 | u8 lane_status; | ||
231 | int lane; | ||
232 | |||
233 | lane_align = dp_link_status(link_status, | ||
234 | DP_LANE_ALIGN_STATUS_UPDATED); | ||
235 | if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) | ||
236 | return false; | ||
237 | for (lane = 0; lane < lane_count; lane++) { | ||
238 | lane_status = dp_get_lane_status(link_status, lane); | ||
239 | if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS) | ||
240 | return false; | ||
241 | } | ||
242 | return true; | ||
243 | } | ||
244 | |||
245 | static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
246 | int lane) | ||
247 | |||
248 | { | ||
249 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
250 | int s = ((lane & 1) ? | ||
251 | DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : | ||
252 | DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); | ||
253 | u8 l = dp_link_status(link_status, i); | ||
254 | |||
255 | return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; | ||
256 | } | ||
257 | |||
258 | static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], | ||
259 | int lane) | ||
260 | { | ||
261 | int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); | ||
262 | int s = ((lane & 1) ? | ||
263 | DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : | ||
264 | DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); | ||
265 | u8 l = dp_link_status(link_status, i); | ||
266 | |||
267 | return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; | ||
268 | } | ||
269 | |||
270 | /* XXX fix me -- chip specific */ | ||
271 | #define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 | ||
272 | static u8 dp_pre_emphasis_max(u8 voltage_swing) | ||
273 | { | ||
274 | switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { | ||
275 | case DP_TRAIN_VOLTAGE_SWING_400: | ||
276 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
277 | case DP_TRAIN_VOLTAGE_SWING_600: | ||
278 | return DP_TRAIN_PRE_EMPHASIS_6; | ||
279 | case DP_TRAIN_VOLTAGE_SWING_800: | ||
280 | return DP_TRAIN_PRE_EMPHASIS_3_5; | ||
281 | case DP_TRAIN_VOLTAGE_SWING_1200: | ||
282 | default: | ||
283 | return DP_TRAIN_PRE_EMPHASIS_0; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], | ||
288 | int lane_count, | ||
289 | u8 train_set[4]) | ||
290 | { | ||
291 | u8 v = 0; | ||
292 | u8 p = 0; | ||
293 | int lane; | ||
294 | |||
295 | for (lane = 0; lane < lane_count; lane++) { | ||
296 | u8 this_v = dp_get_adjust_request_voltage(link_status, lane); | ||
297 | u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); | ||
298 | |||
299 | DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n", | ||
300 | lane, | ||
301 | voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], | ||
302 | pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); | ||
303 | |||
304 | if (this_v > v) | ||
305 | v = this_v; | ||
306 | if (this_p > p) | ||
307 | p = this_p; | ||
308 | } | ||
309 | |||
310 | if (v >= DP_VOLTAGE_MAX) | ||
311 | v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; | ||
312 | |||
313 | if (p >= dp_pre_emphasis_max(v)) | ||
314 | p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; | ||
315 | |||
316 | DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n", | ||
317 | voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], | ||
318 | pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); | ||
319 | |||
320 | for (lane = 0; lane < 4; lane++) | ||
321 | train_set[lane] = v | p; | ||
322 | } | ||
323 | |||
324 | |||
325 | /* radeon aux chan functions */ | ||
326 | bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, | ||
327 | int num_bytes, u8 *read_byte, | ||
328 | u8 read_buf_len, u8 delay) | ||
329 | { | ||
330 | struct drm_device *dev = chan->dev; | ||
331 | struct radeon_device *rdev = dev->dev_private; | ||
332 | PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; | ||
333 | int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); | ||
334 | unsigned char *base; | ||
335 | |||
336 | memset(&args, 0, sizeof(args)); | ||
337 | |||
338 | base = (unsigned char *)rdev->mode_info.atom_context->scratch; | ||
339 | |||
340 | memcpy(base, req_bytes, num_bytes); | ||
341 | |||
342 | args.lpAuxRequest = 0; | ||
343 | args.lpDataOut = 16; | ||
344 | args.ucDataOutLen = 0; | ||
345 | args.ucChannelID = chan->rec.i2c_id; | ||
346 | args.ucDelay = delay / 10; | ||
347 | |||
348 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
349 | |||
350 | if (args.ucReplyStatus) { | ||
351 | DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n", | ||
352 | req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], | ||
353 | chan->rec.i2c_id, args.ucReplyStatus); | ||
354 | return false; | ||
355 | } | ||
356 | |||
357 | if (args.ucDataOutLen && read_byte && read_buf_len) { | ||
358 | if (read_buf_len < args.ucDataOutLen) { | ||
359 | DRM_ERROR("Buffer to small for return answer %d %d\n", | ||
360 | read_buf_len, args.ucDataOutLen); | ||
361 | return false; | ||
362 | } | ||
363 | { | ||
364 | int len = min(read_buf_len, args.ucDataOutLen); | ||
365 | memcpy(read_byte, base + 16, len); | ||
366 | } | ||
367 | } | ||
368 | return true; | ||
369 | } | ||
370 | |||
371 | bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address, | ||
372 | uint8_t send_bytes, uint8_t *send) | ||
373 | { | ||
374 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
375 | u8 msg[20]; | ||
376 | u8 msg_len, dp_msg_len; | ||
377 | bool ret; | ||
378 | |||
379 | dp_msg_len = 4; | ||
380 | msg[0] = address; | ||
381 | msg[1] = address >> 8; | ||
382 | msg[2] = AUX_NATIVE_WRITE << 4; | ||
383 | dp_msg_len += send_bytes; | ||
384 | msg[3] = (dp_msg_len << 4) | (send_bytes - 1); | ||
385 | |||
386 | if (send_bytes > 16) | ||
387 | return false; | ||
388 | |||
389 | memcpy(&msg[4], send, send_bytes); | ||
390 | msg_len = 4 + send_bytes; | ||
391 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0); | ||
392 | return ret; | ||
393 | } | ||
394 | |||
395 | bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address, | ||
396 | uint8_t delay, uint8_t expected_bytes, | ||
397 | uint8_t *read_p) | ||
398 | { | ||
399 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
400 | u8 msg[20]; | ||
401 | u8 msg_len, dp_msg_len; | ||
402 | bool ret = false; | ||
403 | msg_len = 4; | ||
404 | dp_msg_len = 4; | ||
405 | msg[0] = address; | ||
406 | msg[1] = address >> 8; | ||
407 | msg[2] = AUX_NATIVE_READ << 4; | ||
408 | msg[3] = (dp_msg_len) << 4; | ||
409 | msg[3] |= expected_bytes - 1; | ||
410 | |||
411 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay); | ||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | /* radeon dp functions */ | ||
416 | static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock, | ||
417 | uint8_t ucconfig, uint8_t lane_num) | ||
418 | { | ||
419 | DP_ENCODER_SERVICE_PARAMETERS args; | ||
420 | int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); | ||
421 | |||
422 | memset(&args, 0, sizeof(args)); | ||
423 | args.ucLinkClock = dp_clock / 10; | ||
424 | args.ucConfig = ucconfig; | ||
425 | args.ucAction = action; | ||
426 | args.ucLaneNum = lane_num; | ||
427 | args.ucStatus = 0; | ||
428 | |||
429 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
430 | return args.ucStatus; | ||
431 | } | ||
432 | |||
433 | u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector) | ||
434 | { | ||
435 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
436 | struct drm_device *dev = radeon_connector->base.dev; | ||
437 | struct radeon_device *rdev = dev->dev_private; | ||
438 | |||
439 | return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0, | ||
440 | dig_connector->dp_i2c_bus->rec.i2c_id, 0); | ||
441 | } | ||
442 | |||
443 | bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) | ||
444 | { | ||
445 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
446 | u8 msg[25]; | ||
447 | int ret; | ||
448 | |||
449 | ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg); | ||
450 | if (ret) { | ||
451 | memcpy(dig_connector->dpcd, msg, 8); | ||
452 | { | ||
453 | int i; | ||
454 | DRM_DEBUG("DPCD: "); | ||
455 | for (i = 0; i < 8; i++) | ||
456 | DRM_DEBUG("%02x ", msg[i]); | ||
457 | DRM_DEBUG("\n"); | ||
458 | } | ||
459 | return true; | ||
460 | } | ||
461 | dig_connector->dpcd[0] = 0; | ||
462 | return false; | ||
463 | } | ||
464 | |||
465 | void radeon_dp_set_link_config(struct drm_connector *connector, | ||
466 | struct drm_display_mode *mode) | ||
467 | { | ||
468 | struct radeon_connector *radeon_connector; | ||
469 | struct radeon_connector_atom_dig *dig_connector; | ||
470 | |||
471 | if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) | ||
472 | return; | ||
473 | |||
474 | radeon_connector = to_radeon_connector(connector); | ||
475 | if (!radeon_connector->con_priv) | ||
476 | return; | ||
477 | dig_connector = radeon_connector->con_priv; | ||
478 | |||
479 | dig_connector->dp_clock = | ||
480 | dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock); | ||
481 | dig_connector->dp_lane_count = | ||
482 | dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock); | ||
483 | } | ||
484 | |||
485 | int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, | ||
486 | struct drm_display_mode *mode) | ||
487 | { | ||
488 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
489 | |||
490 | return dp_mode_valid(dig_connector->dpcd, mode->clock); | ||
491 | } | ||
492 | |||
493 | static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, | ||
494 | u8 link_status[DP_LINK_STATUS_SIZE]) | ||
495 | { | ||
496 | int ret; | ||
497 | ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100, | ||
498 | DP_LINK_STATUS_SIZE, link_status); | ||
499 | if (!ret) { | ||
500 | DRM_ERROR("displayport link status failed\n"); | ||
501 | return false; | ||
502 | } | ||
503 | |||
504 | DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n", | ||
505 | link_status[0], link_status[1], link_status[2], | ||
506 | link_status[3], link_status[4], link_status[5]); | ||
507 | return true; | ||
508 | } | ||
509 | |||
510 | bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) | ||
511 | { | ||
512 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
513 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
514 | |||
515 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | ||
516 | return false; | ||
517 | if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) | ||
518 | return false; | ||
519 | return true; | ||
520 | } | ||
521 | |||
522 | static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state) | ||
523 | { | ||
524 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
525 | |||
526 | if (dig_connector->dpcd[0] >= 0x11) { | ||
527 | radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1, | ||
528 | &power_state); | ||
529 | } | ||
530 | } | ||
531 | |||
532 | static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread) | ||
533 | { | ||
534 | radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1, | ||
535 | &downspread); | ||
536 | } | ||
537 | |||
538 | static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector, | ||
539 | u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]) | ||
540 | { | ||
541 | radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2, | ||
542 | link_configuration); | ||
543 | } | ||
544 | |||
545 | static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector, | ||
546 | struct drm_encoder *encoder, | ||
547 | u8 train_set[4]) | ||
548 | { | ||
549 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; | ||
550 | int i; | ||
551 | |||
552 | for (i = 0; i < dig_connector->dp_lane_count; i++) | ||
553 | atombios_dig_transmitter_setup(encoder, | ||
554 | ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH, | ||
555 | i, train_set[i]); | ||
556 | |||
557 | radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET, | ||
558 | dig_connector->dp_lane_count, train_set); | ||
559 | } | ||
560 | |||
561 | static void dp_set_training(struct radeon_connector *radeon_connector, | ||
562 | u8 training) | ||
563 | { | ||
564 | radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET, | ||
565 | 1, &training); | ||
566 | } | ||
567 | |||
568 | void dp_link_train(struct drm_encoder *encoder, | ||
569 | struct drm_connector *connector) | ||
570 | { | ||
571 | struct drm_device *dev = encoder->dev; | ||
572 | struct radeon_device *rdev = dev->dev_private; | ||
573 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
574 | struct radeon_encoder_atom_dig *dig; | ||
575 | struct radeon_connector *radeon_connector; | ||
576 | struct radeon_connector_atom_dig *dig_connector; | ||
577 | int enc_id = 0; | ||
578 | bool clock_recovery, channel_eq; | ||
579 | u8 link_status[DP_LINK_STATUS_SIZE]; | ||
580 | u8 link_configuration[DP_LINK_CONFIGURATION_SIZE]; | ||
581 | u8 tries, voltage; | ||
582 | u8 train_set[4]; | ||
583 | int i; | ||
584 | |||
585 | if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) | ||
586 | return; | ||
587 | |||
588 | if (!radeon_encoder->enc_priv) | ||
589 | return; | ||
590 | dig = radeon_encoder->enc_priv; | ||
591 | |||
592 | radeon_connector = to_radeon_connector(connector); | ||
593 | if (!radeon_connector->con_priv) | ||
594 | return; | ||
595 | dig_connector = radeon_connector->con_priv; | ||
596 | |||
597 | if (ASIC_IS_DCE32(rdev)) { | ||
598 | if (dig->dig_block) | ||
599 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER; | ||
600 | else | ||
601 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER; | ||
602 | if (dig_connector->linkb) | ||
603 | enc_id |= ATOM_DP_CONFIG_LINK_B; | ||
604 | else | ||
605 | enc_id |= ATOM_DP_CONFIG_LINK_A; | ||
606 | } else { | ||
607 | if (dig_connector->linkb) | ||
608 | enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B; | ||
609 | else | ||
610 | enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A; | ||
611 | } | ||
612 | |||
613 | memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); | ||
614 | if (dig_connector->dp_clock == 270000) | ||
615 | link_configuration[0] = DP_LINK_BW_2_7; | ||
616 | else | ||
617 | link_configuration[0] = DP_LINK_BW_1_62; | ||
618 | link_configuration[1] = dig_connector->dp_lane_count; | ||
619 | if (dig_connector->dpcd[0] >= 0x11) | ||
620 | link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; | ||
621 | |||
622 | /* power up the sink */ | ||
623 | dp_set_power(radeon_connector, DP_SET_POWER_D0); | ||
624 | /* disable the training pattern on the sink */ | ||
625 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
626 | /* set link bw and lanes on the sink */ | ||
627 | dp_set_link_bw_lanes(radeon_connector, link_configuration); | ||
628 | /* disable downspread on the sink */ | ||
629 | dp_set_downspread(radeon_connector, 0); | ||
630 | /* start training on the source */ | ||
631 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, | ||
632 | dig_connector->dp_clock, enc_id, 0); | ||
633 | /* set training pattern 1 on the source */ | ||
634 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, | ||
635 | dig_connector->dp_clock, enc_id, 0); | ||
636 | |||
637 | /* set initial vs/emph */ | ||
638 | memset(train_set, 0, 4); | ||
639 | udelay(400); | ||
640 | /* set training pattern 1 on the sink */ | ||
641 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1); | ||
642 | |||
643 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | ||
644 | |||
645 | /* clock recovery loop */ | ||
646 | clock_recovery = false; | ||
647 | tries = 0; | ||
648 | voltage = 0xff; | ||
649 | for (;;) { | ||
650 | udelay(100); | ||
651 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | ||
652 | break; | ||
653 | |||
654 | if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) { | ||
655 | clock_recovery = true; | ||
656 | break; | ||
657 | } | ||
658 | |||
659 | for (i = 0; i < dig_connector->dp_lane_count; i++) { | ||
660 | if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
661 | break; | ||
662 | } | ||
663 | if (i == dig_connector->dp_lane_count) { | ||
664 | DRM_ERROR("clock recovery reached max voltage\n"); | ||
665 | break; | ||
666 | } | ||
667 | |||
668 | if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | ||
669 | ++tries; | ||
670 | if (tries == 5) { | ||
671 | DRM_ERROR("clock recovery tried 5 times\n"); | ||
672 | break; | ||
673 | } | ||
674 | } else | ||
675 | tries = 0; | ||
676 | |||
677 | voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
678 | |||
679 | /* Compute new train_set as requested by sink */ | ||
680 | dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); | ||
681 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | ||
682 | } | ||
683 | if (!clock_recovery) | ||
684 | DRM_ERROR("clock recovery failed\n"); | ||
685 | else | ||
686 | DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n", | ||
687 | train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | ||
688 | (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> | ||
689 | DP_TRAIN_PRE_EMPHASIS_SHIFT); | ||
690 | |||
691 | |||
692 | /* set training pattern 2 on the sink */ | ||
693 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); | ||
694 | /* set training pattern 2 on the source */ | ||
695 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, | ||
696 | dig_connector->dp_clock, enc_id, 1); | ||
697 | |||
698 | /* channel equalization loop */ | ||
699 | tries = 0; | ||
700 | channel_eq = false; | ||
701 | for (;;) { | ||
702 | udelay(400); | ||
703 | if (!atom_dp_get_link_status(radeon_connector, link_status)) | ||
704 | break; | ||
705 | |||
706 | if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) { | ||
707 | channel_eq = true; | ||
708 | break; | ||
709 | } | ||
710 | |||
711 | /* Try 5 times */ | ||
712 | if (tries > 5) { | ||
713 | DRM_ERROR("channel eq failed: 5 tries\n"); | ||
714 | break; | ||
715 | } | ||
716 | |||
717 | /* Compute new train_set as requested by sink */ | ||
718 | dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set); | ||
719 | dp_update_dpvs_emph(radeon_connector, encoder, train_set); | ||
720 | |||
721 | tries++; | ||
722 | } | ||
723 | |||
724 | if (!channel_eq) | ||
725 | DRM_ERROR("channel eq failed\n"); | ||
726 | else | ||
727 | DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n", | ||
728 | train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, | ||
729 | (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) | ||
730 | >> DP_TRAIN_PRE_EMPHASIS_SHIFT); | ||
731 | |||
732 | /* disable the training pattern on the sink */ | ||
733 | dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); | ||
734 | |||
735 | radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, | ||
736 | dig_connector->dp_clock, enc_id, 0); | ||
737 | } | ||
738 | |||
739 | int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | ||
740 | uint8_t write_byte, uint8_t *read_byte) | ||
741 | { | ||
742 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
743 | struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter; | ||
744 | int ret = 0; | ||
745 | uint16_t address = algo_data->address; | ||
746 | uint8_t msg[5]; | ||
747 | uint8_t reply[2]; | ||
748 | int msg_len, dp_msg_len; | ||
749 | int reply_bytes; | ||
750 | |||
751 | /* Set up the command byte */ | ||
752 | if (mode & MODE_I2C_READ) | ||
753 | msg[2] = AUX_I2C_READ << 4; | ||
754 | else | ||
755 | msg[2] = AUX_I2C_WRITE << 4; | ||
756 | |||
757 | if (!(mode & MODE_I2C_STOP)) | ||
758 | msg[2] |= AUX_I2C_MOT << 4; | ||
759 | |||
760 | msg[0] = address; | ||
761 | msg[1] = address >> 8; | ||
762 | |||
763 | reply_bytes = 1; | ||
764 | |||
765 | msg_len = 4; | ||
766 | dp_msg_len = 3; | ||
767 | switch (mode) { | ||
768 | case MODE_I2C_WRITE: | ||
769 | msg[4] = write_byte; | ||
770 | msg_len++; | ||
771 | dp_msg_len += 2; | ||
772 | break; | ||
773 | case MODE_I2C_READ: | ||
774 | dp_msg_len += 1; | ||
775 | break; | ||
776 | default: | ||
777 | break; | ||
778 | } | ||
779 | |||
780 | msg[3] = (dp_msg_len) << 4; | ||
781 | ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0); | ||
782 | |||
783 | if (ret) { | ||
784 | if (read_byte) | ||
785 | *read_byte = reply[0]; | ||
786 | return reply_bytes; | ||
787 | } | ||
788 | return -EREMOTEIO; | ||
789 | } | ||
790 | |||
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c index fb211e585dea..0d79577c1576 100644 --- a/drivers/gpu/drm/radeon/mkregtable.c +++ b/drivers/gpu/drm/radeon/mkregtable.c | |||
@@ -561,7 +561,7 @@ struct table { | |||
561 | char *gpu_prefix; | 561 | char *gpu_prefix; |
562 | }; | 562 | }; |
563 | 563 | ||
564 | struct offset *offset_new(unsigned o) | 564 | static struct offset *offset_new(unsigned o) |
565 | { | 565 | { |
566 | struct offset *offset; | 566 | struct offset *offset; |
567 | 567 | ||
@@ -573,12 +573,12 @@ struct offset *offset_new(unsigned o) | |||
573 | return offset; | 573 | return offset; |
574 | } | 574 | } |
575 | 575 | ||
576 | void table_offset_add(struct table *t, struct offset *offset) | 576 | static void table_offset_add(struct table *t, struct offset *offset) |
577 | { | 577 | { |
578 | list_add_tail(&offset->list, &t->offsets); | 578 | list_add_tail(&offset->list, &t->offsets); |
579 | } | 579 | } |
580 | 580 | ||
581 | void table_init(struct table *t) | 581 | static void table_init(struct table *t) |
582 | { | 582 | { |
583 | INIT_LIST_HEAD(&t->offsets); | 583 | INIT_LIST_HEAD(&t->offsets); |
584 | t->offset_max = 0; | 584 | t->offset_max = 0; |
@@ -586,7 +586,7 @@ void table_init(struct table *t) | |||
586 | t->table = NULL; | 586 | t->table = NULL; |
587 | } | 587 | } |
588 | 588 | ||
589 | void table_print(struct table *t) | 589 | static void table_print(struct table *t) |
590 | { | 590 | { |
591 | unsigned nlloop, i, j, n, c, id; | 591 | unsigned nlloop, i, j, n, c, id; |
592 | 592 | ||
@@ -611,7 +611,7 @@ void table_print(struct table *t) | |||
611 | printf("};\n"); | 611 | printf("};\n"); |
612 | } | 612 | } |
613 | 613 | ||
614 | int table_build(struct table *t) | 614 | static int table_build(struct table *t) |
615 | { | 615 | { |
616 | struct offset *offset; | 616 | struct offset *offset; |
617 | unsigned i, m; | 617 | unsigned i, m; |
@@ -631,7 +631,7 @@ int table_build(struct table *t) | |||
631 | } | 631 | } |
632 | 632 | ||
633 | static char gpu_name[10]; | 633 | static char gpu_name[10]; |
634 | int parser_auth(struct table *t, const char *filename) | 634 | static int parser_auth(struct table *t, const char *filename) |
635 | { | 635 | { |
636 | FILE *file; | 636 | FILE *file; |
637 | regex_t mask_rex; | 637 | regex_t mask_rex; |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c9e93eabcf16..b7baf16c11d7 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 | 65 | * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
66 | */ | 66 | */ |
67 | 67 | ||
68 | /* hpd for digital panel detect/disconnect */ | ||
69 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | ||
70 | { | ||
71 | bool connected = false; | ||
72 | |||
73 | switch (hpd) { | ||
74 | case RADEON_HPD_1: | ||
75 | if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE) | ||
76 | connected = true; | ||
77 | break; | ||
78 | case RADEON_HPD_2: | ||
79 | if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE) | ||
80 | connected = true; | ||
81 | break; | ||
82 | default: | ||
83 | break; | ||
84 | } | ||
85 | return connected; | ||
86 | } | ||
87 | |||
88 | void r100_hpd_set_polarity(struct radeon_device *rdev, | ||
89 | enum radeon_hpd_id hpd) | ||
90 | { | ||
91 | u32 tmp; | ||
92 | bool connected = r100_hpd_sense(rdev, hpd); | ||
93 | |||
94 | switch (hpd) { | ||
95 | case RADEON_HPD_1: | ||
96 | tmp = RREG32(RADEON_FP_GEN_CNTL); | ||
97 | if (connected) | ||
98 | tmp &= ~RADEON_FP_DETECT_INT_POL; | ||
99 | else | ||
100 | tmp |= RADEON_FP_DETECT_INT_POL; | ||
101 | WREG32(RADEON_FP_GEN_CNTL, tmp); | ||
102 | break; | ||
103 | case RADEON_HPD_2: | ||
104 | tmp = RREG32(RADEON_FP2_GEN_CNTL); | ||
105 | if (connected) | ||
106 | tmp &= ~RADEON_FP2_DETECT_INT_POL; | ||
107 | else | ||
108 | tmp |= RADEON_FP2_DETECT_INT_POL; | ||
109 | WREG32(RADEON_FP2_GEN_CNTL, tmp); | ||
110 | break; | ||
111 | default: | ||
112 | break; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | void r100_hpd_init(struct radeon_device *rdev) | ||
117 | { | ||
118 | struct drm_device *dev = rdev->ddev; | ||
119 | struct drm_connector *connector; | ||
120 | |||
121 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
122 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
123 | switch (radeon_connector->hpd.hpd) { | ||
124 | case RADEON_HPD_1: | ||
125 | rdev->irq.hpd[0] = true; | ||
126 | break; | ||
127 | case RADEON_HPD_2: | ||
128 | rdev->irq.hpd[1] = true; | ||
129 | break; | ||
130 | default: | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | r100_irq_set(rdev); | ||
135 | } | ||
136 | |||
137 | void r100_hpd_fini(struct radeon_device *rdev) | ||
138 | { | ||
139 | struct drm_device *dev = rdev->ddev; | ||
140 | struct drm_connector *connector; | ||
141 | |||
142 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
143 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
144 | switch (radeon_connector->hpd.hpd) { | ||
145 | case RADEON_HPD_1: | ||
146 | rdev->irq.hpd[0] = false; | ||
147 | break; | ||
148 | case RADEON_HPD_2: | ||
149 | rdev->irq.hpd[1] = false; | ||
150 | break; | ||
151 | default: | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | } | ||
156 | |||
68 | /* | 157 | /* |
69 | * PCI GART | 158 | * PCI GART |
70 | */ | 159 | */ |
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev) | |||
94 | return radeon_gart_table_ram_alloc(rdev); | 183 | return radeon_gart_table_ram_alloc(rdev); |
95 | } | 184 | } |
96 | 185 | ||
186 | /* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */ | ||
187 | void r100_enable_bm(struct radeon_device *rdev) | ||
188 | { | ||
189 | uint32_t tmp; | ||
190 | /* Enable bus mastering */ | ||
191 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
192 | WREG32(RADEON_BUS_CNTL, tmp); | ||
193 | } | ||
194 | |||
97 | int r100_pci_gart_enable(struct radeon_device *rdev) | 195 | int r100_pci_gart_enable(struct radeon_device *rdev) |
98 | { | 196 | { |
99 | uint32_t tmp; | 197 | uint32_t tmp; |
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev) | |||
105 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); | 203 | WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); |
106 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; | 204 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
107 | WREG32(RADEON_AIC_HI_ADDR, tmp); | 205 | WREG32(RADEON_AIC_HI_ADDR, tmp); |
108 | /* Enable bus mastering */ | ||
109 | tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; | ||
110 | WREG32(RADEON_BUS_CNTL, tmp); | ||
111 | /* set PCI GART page-table base address */ | 206 | /* set PCI GART page-table base address */ |
112 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); | 207 | WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); |
113 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; | 208 | tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; |
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev) | |||
157 | if (rdev->irq.crtc_vblank_int[1]) { | 252 | if (rdev->irq.crtc_vblank_int[1]) { |
158 | tmp |= RADEON_CRTC2_VBLANK_MASK; | 253 | tmp |= RADEON_CRTC2_VBLANK_MASK; |
159 | } | 254 | } |
255 | if (rdev->irq.hpd[0]) { | ||
256 | tmp |= RADEON_FP_DETECT_MASK; | ||
257 | } | ||
258 | if (rdev->irq.hpd[1]) { | ||
259 | tmp |= RADEON_FP2_DETECT_MASK; | ||
260 | } | ||
160 | WREG32(RADEON_GEN_INT_CNTL, tmp); | 261 | WREG32(RADEON_GEN_INT_CNTL, tmp); |
161 | return 0; | 262 | return 0; |
162 | } | 263 | } |
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev) | |||
175 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | 276 | static inline uint32_t r100_irq_ack(struct radeon_device *rdev) |
176 | { | 277 | { |
177 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); | 278 | uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); |
178 | uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | | 279 | uint32_t irq_mask = RADEON_SW_INT_TEST | |
179 | RADEON_CRTC2_VBLANK_STAT; | 280 | RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT | |
281 | RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT; | ||
180 | 282 | ||
181 | if (irqs) { | 283 | if (irqs) { |
182 | WREG32(RADEON_GEN_INT_STATUS, irqs); | 284 | WREG32(RADEON_GEN_INT_STATUS, irqs); |
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev) | |||
187 | int r100_irq_process(struct radeon_device *rdev) | 289 | int r100_irq_process(struct radeon_device *rdev) |
188 | { | 290 | { |
189 | uint32_t status, msi_rearm; | 291 | uint32_t status, msi_rearm; |
292 | bool queue_hotplug = false; | ||
190 | 293 | ||
191 | status = r100_irq_ack(rdev); | 294 | status = r100_irq_ack(rdev); |
192 | if (!status) { | 295 | if (!status) { |
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev) | |||
207 | if (status & RADEON_CRTC2_VBLANK_STAT) { | 310 | if (status & RADEON_CRTC2_VBLANK_STAT) { |
208 | drm_handle_vblank(rdev->ddev, 1); | 311 | drm_handle_vblank(rdev->ddev, 1); |
209 | } | 312 | } |
313 | if (status & RADEON_FP_DETECT_STAT) { | ||
314 | queue_hotplug = true; | ||
315 | DRM_DEBUG("HPD1\n"); | ||
316 | } | ||
317 | if (status & RADEON_FP2_DETECT_STAT) { | ||
318 | queue_hotplug = true; | ||
319 | DRM_DEBUG("HPD2\n"); | ||
320 | } | ||
210 | status = r100_irq_ack(rdev); | 321 | status = r100_irq_ack(rdev); |
211 | } | 322 | } |
323 | if (queue_hotplug) | ||
324 | queue_work(rdev->wq, &rdev->hotplug_work); | ||
212 | if (rdev->msi_enabled) { | 325 | if (rdev->msi_enabled) { |
213 | switch (rdev->family) { | 326 | switch (rdev->family) { |
214 | case CHIP_RS400: | 327 | case CHIP_RS400: |
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev) | |||
255 | int r; | 368 | int r; |
256 | 369 | ||
257 | if (rdev->wb.wb_obj == NULL) { | 370 | if (rdev->wb.wb_obj == NULL) { |
258 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, | 371 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
259 | true, | 372 | RADEON_GEM_DOMAIN_GTT, |
260 | RADEON_GEM_DOMAIN_GTT, | 373 | &rdev->wb.wb_obj); |
261 | false, &rdev->wb.wb_obj); | ||
262 | if (r) { | 374 | if (r) { |
263 | DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); | 375 | dev_err(rdev->dev, "(%d) create WB buffer failed\n", r); |
264 | return r; | 376 | return r; |
265 | } | 377 | } |
266 | r = radeon_object_pin(rdev->wb.wb_obj, | 378 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
267 | RADEON_GEM_DOMAIN_GTT, | 379 | if (unlikely(r != 0)) |
268 | &rdev->wb.gpu_addr); | 380 | return r; |
381 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
382 | &rdev->wb.gpu_addr); | ||
269 | if (r) { | 383 | if (r) { |
270 | DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); | 384 | dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r); |
385 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
271 | return r; | 386 | return r; |
272 | } | 387 | } |
273 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 388 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
389 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
274 | if (r) { | 390 | if (r) { |
275 | DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); | 391 | dev_err(rdev->dev, "(%d) map WB buffer failed\n", r); |
276 | return r; | 392 | return r; |
277 | } | 393 | } |
278 | } | 394 | } |
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev) | |||
290 | 406 | ||
291 | void r100_wb_fini(struct radeon_device *rdev) | 407 | void r100_wb_fini(struct radeon_device *rdev) |
292 | { | 408 | { |
409 | int r; | ||
410 | |||
293 | r100_wb_disable(rdev); | 411 | r100_wb_disable(rdev); |
294 | if (rdev->wb.wb_obj) { | 412 | if (rdev->wb.wb_obj) { |
295 | radeon_object_kunmap(rdev->wb.wb_obj); | 413 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
296 | radeon_object_unpin(rdev->wb.wb_obj); | 414 | if (unlikely(r != 0)) { |
297 | radeon_object_unref(&rdev->wb.wb_obj); | 415 | dev_err(rdev->dev, "(%d) can't finish WB\n", r); |
416 | return; | ||
417 | } | ||
418 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
419 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
420 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
421 | radeon_bo_unref(&rdev->wb.wb_obj); | ||
298 | rdev->wb.wb = NULL; | 422 | rdev->wb.wb = NULL; |
299 | rdev->wb.wb_obj = NULL; | 423 | rdev->wb.wb_obj = NULL; |
300 | } | 424 | } |
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p, | |||
1288 | 1412 | ||
1289 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | 1413 | int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
1290 | struct radeon_cs_packet *pkt, | 1414 | struct radeon_cs_packet *pkt, |
1291 | struct radeon_object *robj) | 1415 | struct radeon_bo *robj) |
1292 | { | 1416 | { |
1293 | unsigned idx; | 1417 | unsigned idx; |
1294 | u32 value; | 1418 | u32 value; |
1295 | idx = pkt->idx + 1; | 1419 | idx = pkt->idx + 1; |
1296 | value = radeon_get_ib_value(p, idx + 2); | 1420 | value = radeon_get_ib_value(p, idx + 2); |
1297 | if ((value + 1) > radeon_object_size(robj)) { | 1421 | if ((value + 1) > radeon_bo_size(robj)) { |
1298 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " | 1422 | DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " |
1299 | "(need %u have %lu) !\n", | 1423 | "(need %u have %lu) !\n", |
1300 | value + 1, | 1424 | value + 1, |
1301 | radeon_object_size(robj)); | 1425 | radeon_bo_size(robj)); |
1302 | return -EINVAL; | 1426 | return -EINVAL; |
1303 | } | 1427 | } |
1304 | return 0; | 1428 | return 0; |
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev) | |||
1583 | r100_hdp_reset(rdev); | 1707 | r100_hdp_reset(rdev); |
1584 | } | 1708 | } |
1585 | 1709 | ||
1710 | void r100_hdp_flush(struct radeon_device *rdev) | ||
1711 | { | ||
1712 | u32 tmp; | ||
1713 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | ||
1714 | tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; | ||
1715 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
1716 | } | ||
1717 | |||
1586 | void r100_hdp_reset(struct radeon_device *rdev) | 1718 | void r100_hdp_reset(struct radeon_device *rdev) |
1587 | { | 1719 | { |
1588 | uint32_t tmp; | 1720 | uint32_t tmp; |
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev) | |||
1650 | return 0; | 1782 | return 0; |
1651 | } | 1783 | } |
1652 | 1784 | ||
1785 | void r100_set_common_regs(struct radeon_device *rdev) | ||
1786 | { | ||
1787 | /* set these so they don't interfere with anything */ | ||
1788 | WREG32(RADEON_OV0_SCALE_CNTL, 0); | ||
1789 | WREG32(RADEON_SUBPIC_CNTL, 0); | ||
1790 | WREG32(RADEON_VIPH_CONTROL, 0); | ||
1791 | WREG32(RADEON_I2C_CNTL_1, 0); | ||
1792 | WREG32(RADEON_DVI_I2C_CNTL_1, 0); | ||
1793 | WREG32(RADEON_CAP0_TRIG_CNTL, 0); | ||
1794 | WREG32(RADEON_CAP1_TRIG_CNTL, 0); | ||
1795 | } | ||
1653 | 1796 | ||
1654 | /* | 1797 | /* |
1655 | * VRAM info | 1798 | * VRAM info |
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2594 | struct r100_cs_track *track, unsigned idx) | 2737 | struct r100_cs_track *track, unsigned idx) |
2595 | { | 2738 | { |
2596 | unsigned face, w, h; | 2739 | unsigned face, w, h; |
2597 | struct radeon_object *cube_robj; | 2740 | struct radeon_bo *cube_robj; |
2598 | unsigned long size; | 2741 | unsigned long size; |
2599 | 2742 | ||
2600 | for (face = 0; face < 5; face++) { | 2743 | for (face = 0; face < 5; face++) { |
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2607 | 2750 | ||
2608 | size += track->textures[idx].cube_info[face].offset; | 2751 | size += track->textures[idx].cube_info[face].offset; |
2609 | 2752 | ||
2610 | if (size > radeon_object_size(cube_robj)) { | 2753 | if (size > radeon_bo_size(cube_robj)) { |
2611 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", | 2754 | DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", |
2612 | size, radeon_object_size(cube_robj)); | 2755 | size, radeon_bo_size(cube_robj)); |
2613 | r100_cs_track_texture_print(&track->textures[idx]); | 2756 | r100_cs_track_texture_print(&track->textures[idx]); |
2614 | return -1; | 2757 | return -1; |
2615 | } | 2758 | } |
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev, | |||
2620 | static int r100_cs_track_texture_check(struct radeon_device *rdev, | 2763 | static int r100_cs_track_texture_check(struct radeon_device *rdev, |
2621 | struct r100_cs_track *track) | 2764 | struct r100_cs_track *track) |
2622 | { | 2765 | { |
2623 | struct radeon_object *robj; | 2766 | struct radeon_bo *robj; |
2624 | unsigned long size; | 2767 | unsigned long size; |
2625 | unsigned u, i, w, h; | 2768 | unsigned u, i, w, h; |
2626 | int ret; | 2769 | int ret; |
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev, | |||
2676 | "%u\n", track->textures[u].tex_coord_type, u); | 2819 | "%u\n", track->textures[u].tex_coord_type, u); |
2677 | return -EINVAL; | 2820 | return -EINVAL; |
2678 | } | 2821 | } |
2679 | if (size > radeon_object_size(robj)) { | 2822 | if (size > radeon_bo_size(robj)) { |
2680 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " | 2823 | DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
2681 | "%lu\n", u, size, radeon_object_size(robj)); | 2824 | "%lu\n", u, size, radeon_bo_size(robj)); |
2682 | r100_cs_track_texture_print(&track->textures[u]); | 2825 | r100_cs_track_texture_print(&track->textures[u]); |
2683 | return -EINVAL; | 2826 | return -EINVAL; |
2684 | } | 2827 | } |
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2700 | } | 2843 | } |
2701 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; | 2844 | size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
2702 | size += track->cb[i].offset; | 2845 | size += track->cb[i].offset; |
2703 | if (size > radeon_object_size(track->cb[i].robj)) { | 2846 | if (size > radeon_bo_size(track->cb[i].robj)) { |
2704 | DRM_ERROR("[drm] Buffer too small for color buffer %d " | 2847 | DRM_ERROR("[drm] Buffer too small for color buffer %d " |
2705 | "(need %lu have %lu) !\n", i, size, | 2848 | "(need %lu have %lu) !\n", i, size, |
2706 | radeon_object_size(track->cb[i].robj)); | 2849 | radeon_bo_size(track->cb[i].robj)); |
2707 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", | 2850 | DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", |
2708 | i, track->cb[i].pitch, track->cb[i].cpp, | 2851 | i, track->cb[i].pitch, track->cb[i].cpp, |
2709 | track->cb[i].offset, track->maxy); | 2852 | track->cb[i].offset, track->maxy); |
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2717 | } | 2860 | } |
2718 | size = track->zb.pitch * track->zb.cpp * track->maxy; | 2861 | size = track->zb.pitch * track->zb.cpp * track->maxy; |
2719 | size += track->zb.offset; | 2862 | size += track->zb.offset; |
2720 | if (size > radeon_object_size(track->zb.robj)) { | 2863 | if (size > radeon_bo_size(track->zb.robj)) { |
2721 | DRM_ERROR("[drm] Buffer too small for z buffer " | 2864 | DRM_ERROR("[drm] Buffer too small for z buffer " |
2722 | "(need %lu have %lu) !\n", size, | 2865 | "(need %lu have %lu) !\n", size, |
2723 | radeon_object_size(track->zb.robj)); | 2866 | radeon_bo_size(track->zb.robj)); |
2724 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", | 2867 | DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", |
2725 | track->zb.pitch, track->zb.cpp, | 2868 | track->zb.pitch, track->zb.cpp, |
2726 | track->zb.offset, track->maxy); | 2869 | track->zb.offset, track->maxy); |
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2738 | "bound\n", prim_walk, i); | 2881 | "bound\n", prim_walk, i); |
2739 | return -EINVAL; | 2882 | return -EINVAL; |
2740 | } | 2883 | } |
2741 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2884 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2742 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2885 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2743 | "have %lu dwords\n", prim_walk, i, | 2886 | "need %lu dwords have %lu dwords\n", |
2744 | size >> 2, | 2887 | prim_walk, i, size >> 2, |
2745 | radeon_object_size(track->arrays[i].robj) >> 2); | 2888 | radeon_bo_size(track->arrays[i].robj) |
2889 | >> 2); | ||
2746 | DRM_ERROR("Max indices %u\n", track->max_indx); | 2890 | DRM_ERROR("Max indices %u\n", track->max_indx); |
2747 | return -EINVAL; | 2891 | return -EINVAL; |
2748 | } | 2892 | } |
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
2756 | "bound\n", prim_walk, i); | 2900 | "bound\n", prim_walk, i); |
2757 | return -EINVAL; | 2901 | return -EINVAL; |
2758 | } | 2902 | } |
2759 | if (size > radeon_object_size(track->arrays[i].robj)) { | 2903 | if (size > radeon_bo_size(track->arrays[i].robj)) { |
2760 | DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " | 2904 | dev_err(rdev->dev, "(PW %u) Vertex array %u " |
2761 | "have %lu dwords\n", prim_walk, i, size >> 2, | 2905 | "need %lu dwords have %lu dwords\n", |
2762 | radeon_object_size(track->arrays[i].robj) >> 2); | 2906 | prim_walk, i, size >> 2, |
2907 | radeon_bo_size(track->arrays[i].robj) | ||
2908 | >> 2); | ||
2763 | return -EINVAL; | 2909 | return -EINVAL; |
2764 | } | 2910 | } |
2765 | } | 2911 | } |
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev) | |||
3101 | { | 3247 | { |
3102 | int r; | 3248 | int r; |
3103 | 3249 | ||
3250 | /* set common regs */ | ||
3251 | r100_set_common_regs(rdev); | ||
3252 | /* program mc */ | ||
3104 | r100_mc_program(rdev); | 3253 | r100_mc_program(rdev); |
3105 | /* Resume clock */ | 3254 | /* Resume clock */ |
3106 | r100_clock_startup(rdev); | 3255 | r100_clock_startup(rdev); |
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev) | |||
3108 | r100_gpu_init(rdev); | 3257 | r100_gpu_init(rdev); |
3109 | /* Initialize GART (initialize after TTM so we can allocate | 3258 | /* Initialize GART (initialize after TTM so we can allocate |
3110 | * memory through TTM but finalize after TTM) */ | 3259 | * memory through TTM but finalize after TTM) */ |
3260 | r100_enable_bm(rdev); | ||
3111 | if (rdev->flags & RADEON_IS_PCI) { | 3261 | if (rdev->flags & RADEON_IS_PCI) { |
3112 | r = r100_pci_gart_enable(rdev); | 3262 | r = r100_pci_gart_enable(rdev); |
3113 | if (r) | 3263 | if (r) |
3114 | return r; | 3264 | return r; |
3115 | } | 3265 | } |
3116 | /* Enable IRQ */ | 3266 | /* Enable IRQ */ |
3117 | rdev->irq.sw_int = true; | ||
3118 | r100_irq_set(rdev); | 3267 | r100_irq_set(rdev); |
3119 | /* 1M ring buffer */ | 3268 | /* 1M ring buffer */ |
3120 | r = r100_cp_init(rdev, 1024 * 1024); | 3269 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -3174,7 +3323,7 @@ void r100_fini(struct radeon_device *rdev) | |||
3174 | r100_pci_gart_fini(rdev); | 3323 | r100_pci_gart_fini(rdev); |
3175 | radeon_irq_kms_fini(rdev); | 3324 | radeon_irq_kms_fini(rdev); |
3176 | radeon_fence_driver_fini(rdev); | 3325 | radeon_fence_driver_fini(rdev); |
3177 | radeon_object_fini(rdev); | 3326 | radeon_bo_fini(rdev); |
3178 | radeon_atombios_fini(rdev); | 3327 | radeon_atombios_fini(rdev); |
3179 | kfree(rdev->bios); | 3328 | kfree(rdev->bios); |
3180 | rdev->bios = NULL; | 3329 | rdev->bios = NULL; |
@@ -3242,10 +3391,8 @@ int r100_init(struct radeon_device *rdev) | |||
3242 | RREG32(R_0007C0_CP_STAT)); | 3391 | RREG32(R_0007C0_CP_STAT)); |
3243 | } | 3392 | } |
3244 | /* check if cards are posted or not */ | 3393 | /* check if cards are posted or not */ |
3245 | if (!radeon_card_posted(rdev) && rdev->bios) { | 3394 | if (radeon_boot_test_post_card(rdev) == false) |
3246 | DRM_INFO("GPU not posted. posting now...\n"); | 3395 | return -EINVAL; |
3247 | radeon_combios_asic_init(rdev->ddev); | ||
3248 | } | ||
3249 | /* Set asic errata */ | 3396 | /* Set asic errata */ |
3250 | r100_errata(rdev); | 3397 | r100_errata(rdev); |
3251 | /* Initialize clocks */ | 3398 | /* Initialize clocks */ |
@@ -3264,7 +3411,7 @@ int r100_init(struct radeon_device *rdev) | |||
3264 | if (r) | 3411 | if (r) |
3265 | return r; | 3412 | return r; |
3266 | /* Memory manager */ | 3413 | /* Memory manager */ |
3267 | r = radeon_object_init(rdev); | 3414 | r = radeon_bo_init(rdev); |
3268 | if (r) | 3415 | if (r) |
3269 | return r; | 3416 | return r; |
3270 | if (rdev->flags & RADEON_IS_PCI) { | 3417 | if (rdev->flags & RADEON_IS_PCI) { |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 0daf0d76a891..ca50903dd2bb 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
@@ -10,26 +10,26 @@ | |||
10 | * CS functions | 10 | * CS functions |
11 | */ | 11 | */ |
12 | struct r100_cs_track_cb { | 12 | struct r100_cs_track_cb { |
13 | struct radeon_object *robj; | 13 | struct radeon_bo *robj; |
14 | unsigned pitch; | 14 | unsigned pitch; |
15 | unsigned cpp; | 15 | unsigned cpp; |
16 | unsigned offset; | 16 | unsigned offset; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | struct r100_cs_track_array { | 19 | struct r100_cs_track_array { |
20 | struct radeon_object *robj; | 20 | struct radeon_bo *robj; |
21 | unsigned esize; | 21 | unsigned esize; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct r100_cs_cube_info { | 24 | struct r100_cs_cube_info { |
25 | struct radeon_object *robj; | 25 | struct radeon_bo *robj; |
26 | unsigned offset; | 26 | unsigned offset; |
27 | unsigned width; | 27 | unsigned width; |
28 | unsigned height; | 28 | unsigned height; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | struct r100_cs_track_texture { | 31 | struct r100_cs_track_texture { |
32 | struct radeon_object *robj; | 32 | struct radeon_bo *robj; |
33 | struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ | 33 | struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ |
34 | unsigned pitch; | 34 | unsigned pitch; |
35 | unsigned width; | 35 | unsigned width; |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 2f43ee8e4048..86065dcc1982 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) | |||
137 | 137 | ||
138 | void rv370_pcie_gart_disable(struct radeon_device *rdev) | 138 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
139 | { | 139 | { |
140 | uint32_t tmp; | 140 | u32 tmp; |
141 | int r; | ||
141 | 142 | ||
142 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); | 143 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
143 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; | 144 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
144 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); | 145 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
145 | if (rdev->gart.table.vram.robj) { | 146 | if (rdev->gart.table.vram.robj) { |
146 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 147 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
147 | radeon_object_unpin(rdev->gart.table.vram.robj); | 148 | if (likely(r == 0)) { |
149 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
150 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
151 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
152 | } | ||
148 | } | 153 | } |
149 | } | 154 | } |
150 | 155 | ||
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev) | |||
1181 | { | 1186 | { |
1182 | int r; | 1187 | int r; |
1183 | 1188 | ||
1189 | /* set common regs */ | ||
1190 | r100_set_common_regs(rdev); | ||
1191 | /* program mc */ | ||
1184 | r300_mc_program(rdev); | 1192 | r300_mc_program(rdev); |
1185 | /* Resume clock */ | 1193 | /* Resume clock */ |
1186 | r300_clock_startup(rdev); | 1194 | r300_clock_startup(rdev); |
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev) | |||
1193 | if (r) | 1201 | if (r) |
1194 | return r; | 1202 | return r; |
1195 | } | 1203 | } |
1204 | |||
1205 | if (rdev->family == CHIP_R300 || | ||
1206 | rdev->family == CHIP_R350 || | ||
1207 | rdev->family == CHIP_RV350) | ||
1208 | r100_enable_bm(rdev); | ||
1209 | |||
1196 | if (rdev->flags & RADEON_IS_PCI) { | 1210 | if (rdev->flags & RADEON_IS_PCI) { |
1197 | r = r100_pci_gart_enable(rdev); | 1211 | r = r100_pci_gart_enable(rdev); |
1198 | if (r) | 1212 | if (r) |
1199 | return r; | 1213 | return r; |
1200 | } | 1214 | } |
1201 | /* Enable IRQ */ | 1215 | /* Enable IRQ */ |
1202 | rdev->irq.sw_int = true; | ||
1203 | r100_irq_set(rdev); | 1216 | r100_irq_set(rdev); |
1204 | /* 1M ring buffer */ | 1217 | /* 1M ring buffer */ |
1205 | r = r100_cp_init(rdev, 1024 * 1024); | 1218 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -1265,7 +1278,7 @@ void r300_fini(struct radeon_device *rdev) | |||
1265 | r100_pci_gart_fini(rdev); | 1278 | r100_pci_gart_fini(rdev); |
1266 | radeon_irq_kms_fini(rdev); | 1279 | radeon_irq_kms_fini(rdev); |
1267 | radeon_fence_driver_fini(rdev); | 1280 | radeon_fence_driver_fini(rdev); |
1268 | radeon_object_fini(rdev); | 1281 | radeon_bo_fini(rdev); |
1269 | radeon_atombios_fini(rdev); | 1282 | radeon_atombios_fini(rdev); |
1270 | kfree(rdev->bios); | 1283 | kfree(rdev->bios); |
1271 | rdev->bios = NULL; | 1284 | rdev->bios = NULL; |
@@ -1303,10 +1316,8 @@ int r300_init(struct radeon_device *rdev) | |||
1303 | RREG32(R_0007C0_CP_STAT)); | 1316 | RREG32(R_0007C0_CP_STAT)); |
1304 | } | 1317 | } |
1305 | /* check if cards are posted or not */ | 1318 | /* check if cards are posted or not */ |
1306 | if (!radeon_card_posted(rdev) && rdev->bios) { | 1319 | if (radeon_boot_test_post_card(rdev) == false) |
1307 | DRM_INFO("GPU not posted. posting now...\n"); | 1320 | return -EINVAL; |
1308 | radeon_combios_asic_init(rdev->ddev); | ||
1309 | } | ||
1310 | /* Set asic errata */ | 1321 | /* Set asic errata */ |
1311 | r300_errata(rdev); | 1322 | r300_errata(rdev); |
1312 | /* Initialize clocks */ | 1323 | /* Initialize clocks */ |
@@ -1325,7 +1336,7 @@ int r300_init(struct radeon_device *rdev) | |||
1325 | if (r) | 1336 | if (r) |
1326 | return r; | 1337 | return r; |
1327 | /* Memory manager */ | 1338 | /* Memory manager */ |
1328 | r = radeon_object_init(rdev); | 1339 | r = radeon_bo_init(rdev); |
1329 | if (r) | 1340 | if (r) |
1330 | return r; | 1341 | return r; |
1331 | if (rdev->flags & RADEON_IS_PCIE) { | 1342 | if (rdev->flags & RADEON_IS_PCIE) { |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 1cefdbcc0850..162c3902fe69 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev) | |||
169 | { | 169 | { |
170 | int r; | 170 | int r; |
171 | 171 | ||
172 | /* set common regs */ | ||
173 | r100_set_common_regs(rdev); | ||
174 | /* program mc */ | ||
172 | r300_mc_program(rdev); | 175 | r300_mc_program(rdev); |
173 | /* Resume clock */ | 176 | /* Resume clock */ |
174 | r420_clock_resume(rdev); | 177 | r420_clock_resume(rdev); |
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev) | |||
186 | } | 189 | } |
187 | r420_pipes_init(rdev); | 190 | r420_pipes_init(rdev); |
188 | /* Enable IRQ */ | 191 | /* Enable IRQ */ |
189 | rdev->irq.sw_int = true; | ||
190 | r100_irq_set(rdev); | 192 | r100_irq_set(rdev); |
191 | /* 1M ring buffer */ | 193 | /* 1M ring buffer */ |
192 | r = r100_cp_init(rdev, 1024 * 1024); | 194 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -258,7 +260,7 @@ void r420_fini(struct radeon_device *rdev) | |||
258 | radeon_agp_fini(rdev); | 260 | radeon_agp_fini(rdev); |
259 | radeon_irq_kms_fini(rdev); | 261 | radeon_irq_kms_fini(rdev); |
260 | radeon_fence_driver_fini(rdev); | 262 | radeon_fence_driver_fini(rdev); |
261 | radeon_object_fini(rdev); | 263 | radeon_bo_fini(rdev); |
262 | if (rdev->is_atom_bios) { | 264 | if (rdev->is_atom_bios) { |
263 | radeon_atombios_fini(rdev); | 265 | radeon_atombios_fini(rdev); |
264 | } else { | 266 | } else { |
@@ -301,14 +303,9 @@ int r420_init(struct radeon_device *rdev) | |||
301 | RREG32(R_0007C0_CP_STAT)); | 303 | RREG32(R_0007C0_CP_STAT)); |
302 | } | 304 | } |
303 | /* check if cards are posted or not */ | 305 | /* check if cards are posted or not */ |
304 | if (!radeon_card_posted(rdev) && rdev->bios) { | 306 | if (radeon_boot_test_post_card(rdev) == false) |
305 | DRM_INFO("GPU not posted. posting now...\n"); | 307 | return -EINVAL; |
306 | if (rdev->is_atom_bios) { | 308 | |
307 | atom_asic_init(rdev->mode_info.atom_context); | ||
308 | } else { | ||
309 | radeon_combios_asic_init(rdev->ddev); | ||
310 | } | ||
311 | } | ||
312 | /* Initialize clocks */ | 309 | /* Initialize clocks */ |
313 | radeon_get_clock_info(rdev->ddev); | 310 | radeon_get_clock_info(rdev->ddev); |
314 | /* Initialize power management */ | 311 | /* Initialize power management */ |
@@ -331,10 +328,13 @@ int r420_init(struct radeon_device *rdev) | |||
331 | return r; | 328 | return r; |
332 | } | 329 | } |
333 | /* Memory manager */ | 330 | /* Memory manager */ |
334 | r = radeon_object_init(rdev); | 331 | r = radeon_bo_init(rdev); |
335 | if (r) { | 332 | if (r) { |
336 | return r; | 333 | return r; |
337 | } | 334 | } |
335 | if (rdev->family == CHIP_R420) | ||
336 | r100_enable_bm(rdev); | ||
337 | |||
338 | if (rdev->flags & RADEON_IS_PCIE) { | 338 | if (rdev->flags & RADEON_IS_PCIE) { |
339 | r = rv370_pcie_gart_init(rdev); | 339 | r = rv370_pcie_gart_init(rdev); |
340 | if (r) | 340 | if (r) |
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 7baa73955563..74ad89bdf2b5 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h | |||
@@ -716,6 +716,8 @@ | |||
716 | 716 | ||
717 | #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 | 717 | #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 |
718 | 718 | ||
719 | #define AVIVO_DC_GPIO_HPD_A 0x7e94 | ||
720 | |||
719 | #define AVIVO_GPIO_0 0x7e30 | 721 | #define AVIVO_GPIO_0 0x7e30 |
720 | #define AVIVO_GPIO_1 0x7e40 | 722 | #define AVIVO_GPIO_1 0x7e40 |
721 | #define AVIVO_GPIO_2 0x7e50 | 723 | #define AVIVO_GPIO_2 0x7e50 |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index f7435185c0a6..788eef5c2a08 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev) | |||
185 | return r; | 185 | return r; |
186 | } | 186 | } |
187 | /* Enable IRQ */ | 187 | /* Enable IRQ */ |
188 | rdev->irq.sw_int = true; | ||
189 | rs600_irq_set(rdev); | 188 | rs600_irq_set(rdev); |
190 | /* 1M ring buffer */ | 189 | /* 1M ring buffer */ |
191 | r = r100_cp_init(rdev, 1024 * 1024); | 190 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -254,6 +253,9 @@ int r520_init(struct radeon_device *rdev) | |||
254 | RREG32(R_0007C0_CP_STAT)); | 253 | RREG32(R_0007C0_CP_STAT)); |
255 | } | 254 | } |
256 | /* check if cards are posted or not */ | 255 | /* check if cards are posted or not */ |
256 | if (radeon_boot_test_post_card(rdev) == false) | ||
257 | return -EINVAL; | ||
258 | |||
257 | if (!radeon_card_posted(rdev) && rdev->bios) { | 259 | if (!radeon_card_posted(rdev) && rdev->bios) { |
258 | DRM_INFO("GPU not posted. posting now...\n"); | 260 | DRM_INFO("GPU not posted. posting now...\n"); |
259 | atom_asic_init(rdev->mode_info.atom_context); | 261 | atom_asic_init(rdev->mode_info.atom_context); |
@@ -277,7 +279,7 @@ int r520_init(struct radeon_device *rdev) | |||
277 | if (r) | 279 | if (r) |
278 | return r; | 280 | return r; |
279 | /* Memory manager */ | 281 | /* Memory manager */ |
280 | r = radeon_object_init(rdev); | 282 | r = radeon_bo_init(rdev); |
281 | if (r) | 283 | if (r) |
282 | return r; | 284 | return r; |
283 | r = rv370_pcie_gart_init(rdev); | 285 | r = rv370_pcie_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 278f646bc18e..250ec3fe1a16 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -38,8 +38,10 @@ | |||
38 | 38 | ||
39 | #define PFP_UCODE_SIZE 576 | 39 | #define PFP_UCODE_SIZE 576 |
40 | #define PM4_UCODE_SIZE 1792 | 40 | #define PM4_UCODE_SIZE 1792 |
41 | #define RLC_UCODE_SIZE 768 | ||
41 | #define R700_PFP_UCODE_SIZE 848 | 42 | #define R700_PFP_UCODE_SIZE 848 |
42 | #define R700_PM4_UCODE_SIZE 1360 | 43 | #define R700_PM4_UCODE_SIZE 1360 |
44 | #define R700_RLC_UCODE_SIZE 1024 | ||
43 | 45 | ||
44 | /* Firmware Names */ | 46 | /* Firmware Names */ |
45 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); | 47 | MODULE_FIRMWARE("radeon/R600_pfp.bin"); |
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin"); | |||
62 | MODULE_FIRMWARE("radeon/RV730_me.bin"); | 64 | MODULE_FIRMWARE("radeon/RV730_me.bin"); |
63 | MODULE_FIRMWARE("radeon/RV710_pfp.bin"); | 65 | MODULE_FIRMWARE("radeon/RV710_pfp.bin"); |
64 | MODULE_FIRMWARE("radeon/RV710_me.bin"); | 66 | MODULE_FIRMWARE("radeon/RV710_me.bin"); |
67 | MODULE_FIRMWARE("radeon/R600_rlc.bin"); | ||
68 | MODULE_FIRMWARE("radeon/R700_rlc.bin"); | ||
65 | 69 | ||
66 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); | 70 | int r600_debugfs_mc_info_init(struct radeon_device *rdev); |
67 | 71 | ||
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev); | |||
70 | void r600_gpu_init(struct radeon_device *rdev); | 74 | void r600_gpu_init(struct radeon_device *rdev); |
71 | void r600_fini(struct radeon_device *rdev); | 75 | void r600_fini(struct radeon_device *rdev); |
72 | 76 | ||
77 | /* hpd for digital panel detect/disconnect */ | ||
78 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | ||
79 | { | ||
80 | bool connected = false; | ||
81 | |||
82 | if (ASIC_IS_DCE3(rdev)) { | ||
83 | switch (hpd) { | ||
84 | case RADEON_HPD_1: | ||
85 | if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE) | ||
86 | connected = true; | ||
87 | break; | ||
88 | case RADEON_HPD_2: | ||
89 | if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE) | ||
90 | connected = true; | ||
91 | break; | ||
92 | case RADEON_HPD_3: | ||
93 | if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE) | ||
94 | connected = true; | ||
95 | break; | ||
96 | case RADEON_HPD_4: | ||
97 | if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE) | ||
98 | connected = true; | ||
99 | break; | ||
100 | /* DCE 3.2 */ | ||
101 | case RADEON_HPD_5: | ||
102 | if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE) | ||
103 | connected = true; | ||
104 | break; | ||
105 | case RADEON_HPD_6: | ||
106 | if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE) | ||
107 | connected = true; | ||
108 | break; | ||
109 | default: | ||
110 | break; | ||
111 | } | ||
112 | } else { | ||
113 | switch (hpd) { | ||
114 | case RADEON_HPD_1: | ||
115 | if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | ||
116 | connected = true; | ||
117 | break; | ||
118 | case RADEON_HPD_2: | ||
119 | if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | ||
120 | connected = true; | ||
121 | break; | ||
122 | case RADEON_HPD_3: | ||
123 | if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE) | ||
124 | connected = true; | ||
125 | break; | ||
126 | default: | ||
127 | break; | ||
128 | } | ||
129 | } | ||
130 | return connected; | ||
131 | } | ||
132 | |||
133 | void r600_hpd_set_polarity(struct radeon_device *rdev, | ||
134 | enum radeon_hpd_id hpd) | ||
135 | { | ||
136 | u32 tmp; | ||
137 | bool connected = r600_hpd_sense(rdev, hpd); | ||
138 | |||
139 | if (ASIC_IS_DCE3(rdev)) { | ||
140 | switch (hpd) { | ||
141 | case RADEON_HPD_1: | ||
142 | tmp = RREG32(DC_HPD1_INT_CONTROL); | ||
143 | if (connected) | ||
144 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
145 | else | ||
146 | tmp |= DC_HPDx_INT_POLARITY; | ||
147 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
148 | break; | ||
149 | case RADEON_HPD_2: | ||
150 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
151 | if (connected) | ||
152 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
153 | else | ||
154 | tmp |= DC_HPDx_INT_POLARITY; | ||
155 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
156 | break; | ||
157 | case RADEON_HPD_3: | ||
158 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
159 | if (connected) | ||
160 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
161 | else | ||
162 | tmp |= DC_HPDx_INT_POLARITY; | ||
163 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
164 | break; | ||
165 | case RADEON_HPD_4: | ||
166 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
167 | if (connected) | ||
168 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
169 | else | ||
170 | tmp |= DC_HPDx_INT_POLARITY; | ||
171 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
172 | break; | ||
173 | case RADEON_HPD_5: | ||
174 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
175 | if (connected) | ||
176 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
177 | else | ||
178 | tmp |= DC_HPDx_INT_POLARITY; | ||
179 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
180 | break; | ||
181 | /* DCE 3.2 */ | ||
182 | case RADEON_HPD_6: | ||
183 | tmp = RREG32(DC_HPD6_INT_CONTROL); | ||
184 | if (connected) | ||
185 | tmp &= ~DC_HPDx_INT_POLARITY; | ||
186 | else | ||
187 | tmp |= DC_HPDx_INT_POLARITY; | ||
188 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
189 | break; | ||
190 | default: | ||
191 | break; | ||
192 | } | ||
193 | } else { | ||
194 | switch (hpd) { | ||
195 | case RADEON_HPD_1: | ||
196 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); | ||
197 | if (connected) | ||
198 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
199 | else | ||
200 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
201 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | ||
202 | break; | ||
203 | case RADEON_HPD_2: | ||
204 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); | ||
205 | if (connected) | ||
206 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
207 | else | ||
208 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
209 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | ||
210 | break; | ||
211 | case RADEON_HPD_3: | ||
212 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); | ||
213 | if (connected) | ||
214 | tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
215 | else | ||
216 | tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
217 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | ||
218 | break; | ||
219 | default: | ||
220 | break; | ||
221 | } | ||
222 | } | ||
223 | } | ||
224 | |||
225 | void r600_hpd_init(struct radeon_device *rdev) | ||
226 | { | ||
227 | struct drm_device *dev = rdev->ddev; | ||
228 | struct drm_connector *connector; | ||
229 | |||
230 | if (ASIC_IS_DCE3(rdev)) { | ||
231 | u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa); | ||
232 | if (ASIC_IS_DCE32(rdev)) | ||
233 | tmp |= DC_HPDx_EN; | ||
234 | |||
235 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
236 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
237 | switch (radeon_connector->hpd.hpd) { | ||
238 | case RADEON_HPD_1: | ||
239 | WREG32(DC_HPD1_CONTROL, tmp); | ||
240 | rdev->irq.hpd[0] = true; | ||
241 | break; | ||
242 | case RADEON_HPD_2: | ||
243 | WREG32(DC_HPD2_CONTROL, tmp); | ||
244 | rdev->irq.hpd[1] = true; | ||
245 | break; | ||
246 | case RADEON_HPD_3: | ||
247 | WREG32(DC_HPD3_CONTROL, tmp); | ||
248 | rdev->irq.hpd[2] = true; | ||
249 | break; | ||
250 | case RADEON_HPD_4: | ||
251 | WREG32(DC_HPD4_CONTROL, tmp); | ||
252 | rdev->irq.hpd[3] = true; | ||
253 | break; | ||
254 | /* DCE 3.2 */ | ||
255 | case RADEON_HPD_5: | ||
256 | WREG32(DC_HPD5_CONTROL, tmp); | ||
257 | rdev->irq.hpd[4] = true; | ||
258 | break; | ||
259 | case RADEON_HPD_6: | ||
260 | WREG32(DC_HPD6_CONTROL, tmp); | ||
261 | rdev->irq.hpd[5] = true; | ||
262 | break; | ||
263 | default: | ||
264 | break; | ||
265 | } | ||
266 | } | ||
267 | } else { | ||
268 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
269 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
270 | switch (radeon_connector->hpd.hpd) { | ||
271 | case RADEON_HPD_1: | ||
272 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN); | ||
273 | rdev->irq.hpd[0] = true; | ||
274 | break; | ||
275 | case RADEON_HPD_2: | ||
276 | WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN); | ||
277 | rdev->irq.hpd[1] = true; | ||
278 | break; | ||
279 | case RADEON_HPD_3: | ||
280 | WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN); | ||
281 | rdev->irq.hpd[2] = true; | ||
282 | break; | ||
283 | default: | ||
284 | break; | ||
285 | } | ||
286 | } | ||
287 | } | ||
288 | r600_irq_set(rdev); | ||
289 | } | ||
290 | |||
291 | void r600_hpd_fini(struct radeon_device *rdev) | ||
292 | { | ||
293 | struct drm_device *dev = rdev->ddev; | ||
294 | struct drm_connector *connector; | ||
295 | |||
296 | if (ASIC_IS_DCE3(rdev)) { | ||
297 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
298 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
299 | switch (radeon_connector->hpd.hpd) { | ||
300 | case RADEON_HPD_1: | ||
301 | WREG32(DC_HPD1_CONTROL, 0); | ||
302 | rdev->irq.hpd[0] = false; | ||
303 | break; | ||
304 | case RADEON_HPD_2: | ||
305 | WREG32(DC_HPD2_CONTROL, 0); | ||
306 | rdev->irq.hpd[1] = false; | ||
307 | break; | ||
308 | case RADEON_HPD_3: | ||
309 | WREG32(DC_HPD3_CONTROL, 0); | ||
310 | rdev->irq.hpd[2] = false; | ||
311 | break; | ||
312 | case RADEON_HPD_4: | ||
313 | WREG32(DC_HPD4_CONTROL, 0); | ||
314 | rdev->irq.hpd[3] = false; | ||
315 | break; | ||
316 | /* DCE 3.2 */ | ||
317 | case RADEON_HPD_5: | ||
318 | WREG32(DC_HPD5_CONTROL, 0); | ||
319 | rdev->irq.hpd[4] = false; | ||
320 | break; | ||
321 | case RADEON_HPD_6: | ||
322 | WREG32(DC_HPD6_CONTROL, 0); | ||
323 | rdev->irq.hpd[5] = false; | ||
324 | break; | ||
325 | default: | ||
326 | break; | ||
327 | } | ||
328 | } | ||
329 | } else { | ||
330 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
331 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
332 | switch (radeon_connector->hpd.hpd) { | ||
333 | case RADEON_HPD_1: | ||
334 | WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0); | ||
335 | rdev->irq.hpd[0] = false; | ||
336 | break; | ||
337 | case RADEON_HPD_2: | ||
338 | WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0); | ||
339 | rdev->irq.hpd[1] = false; | ||
340 | break; | ||
341 | case RADEON_HPD_3: | ||
342 | WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0); | ||
343 | rdev->irq.hpd[2] = false; | ||
344 | break; | ||
345 | default: | ||
346 | break; | ||
347 | } | ||
348 | } | ||
349 | } | ||
350 | } | ||
351 | |||
73 | /* | 352 | /* |
74 | * R600 PCIE GART | 353 | * R600 PCIE GART |
75 | */ | 354 | */ |
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
180 | void r600_pcie_gart_disable(struct radeon_device *rdev) | 459 | void r600_pcie_gart_disable(struct radeon_device *rdev) |
181 | { | 460 | { |
182 | u32 tmp; | 461 | u32 tmp; |
183 | int i; | 462 | int i, r; |
184 | 463 | ||
185 | /* Disable all tables */ | 464 | /* Disable all tables */ |
186 | for (i = 0; i < 7; i++) | 465 | for (i = 0; i < 7; i++) |
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
208 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | 487 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
209 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | 488 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
210 | if (rdev->gart.table.vram.robj) { | 489 | if (rdev->gart.table.vram.robj) { |
211 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 490 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
212 | radeon_object_unpin(rdev->gart.table.vram.robj); | 491 | if (likely(r == 0)) { |
492 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
493 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
494 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
495 | } | ||
213 | } | 496 | } |
214 | } | 497 | } |
215 | 498 | ||
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1101 | (void)RREG32(PCIE_PORT_DATA); | 1384 | (void)RREG32(PCIE_PORT_DATA); |
1102 | } | 1385 | } |
1103 | 1386 | ||
1387 | void r600_hdp_flush(struct radeon_device *rdev) | ||
1388 | { | ||
1389 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
1390 | } | ||
1104 | 1391 | ||
1105 | /* | 1392 | /* |
1106 | * CP & Ring | 1393 | * CP & Ring |
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev) | |||
1110 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1397 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1111 | } | 1398 | } |
1112 | 1399 | ||
1113 | int r600_cp_init_microcode(struct radeon_device *rdev) | 1400 | int r600_init_microcode(struct radeon_device *rdev) |
1114 | { | 1401 | { |
1115 | struct platform_device *pdev; | 1402 | struct platform_device *pdev; |
1116 | const char *chip_name; | 1403 | const char *chip_name; |
1117 | size_t pfp_req_size, me_req_size; | 1404 | const char *rlc_chip_name; |
1405 | size_t pfp_req_size, me_req_size, rlc_req_size; | ||
1118 | char fw_name[30]; | 1406 | char fw_name[30]; |
1119 | int err; | 1407 | int err; |
1120 | 1408 | ||
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev) | |||
1128 | } | 1416 | } |
1129 | 1417 | ||
1130 | switch (rdev->family) { | 1418 | switch (rdev->family) { |
1131 | case CHIP_R600: chip_name = "R600"; break; | 1419 | case CHIP_R600: |
1132 | case CHIP_RV610: chip_name = "RV610"; break; | 1420 | chip_name = "R600"; |
1133 | case CHIP_RV630: chip_name = "RV630"; break; | 1421 | rlc_chip_name = "R600"; |
1134 | case CHIP_RV620: chip_name = "RV620"; break; | 1422 | break; |
1135 | case CHIP_RV635: chip_name = "RV635"; break; | 1423 | case CHIP_RV610: |
1136 | case CHIP_RV670: chip_name = "RV670"; break; | 1424 | chip_name = "RV610"; |
1425 | rlc_chip_name = "R600"; | ||
1426 | break; | ||
1427 | case CHIP_RV630: | ||
1428 | chip_name = "RV630"; | ||
1429 | rlc_chip_name = "R600"; | ||
1430 | break; | ||
1431 | case CHIP_RV620: | ||
1432 | chip_name = "RV620"; | ||
1433 | rlc_chip_name = "R600"; | ||
1434 | break; | ||
1435 | case CHIP_RV635: | ||
1436 | chip_name = "RV635"; | ||
1437 | rlc_chip_name = "R600"; | ||
1438 | break; | ||
1439 | case CHIP_RV670: | ||
1440 | chip_name = "RV670"; | ||
1441 | rlc_chip_name = "R600"; | ||
1442 | break; | ||
1137 | case CHIP_RS780: | 1443 | case CHIP_RS780: |
1138 | case CHIP_RS880: chip_name = "RS780"; break; | 1444 | case CHIP_RS880: |
1139 | case CHIP_RV770: chip_name = "RV770"; break; | 1445 | chip_name = "RS780"; |
1446 | rlc_chip_name = "R600"; | ||
1447 | break; | ||
1448 | case CHIP_RV770: | ||
1449 | chip_name = "RV770"; | ||
1450 | rlc_chip_name = "R700"; | ||
1451 | break; | ||
1140 | case CHIP_RV730: | 1452 | case CHIP_RV730: |
1141 | case CHIP_RV740: chip_name = "RV730"; break; | 1453 | case CHIP_RV740: |
1142 | case CHIP_RV710: chip_name = "RV710"; break; | 1454 | chip_name = "RV730"; |
1455 | rlc_chip_name = "R700"; | ||
1456 | break; | ||
1457 | case CHIP_RV710: | ||
1458 | chip_name = "RV710"; | ||
1459 | rlc_chip_name = "R700"; | ||
1460 | break; | ||
1143 | default: BUG(); | 1461 | default: BUG(); |
1144 | } | 1462 | } |
1145 | 1463 | ||
1146 | if (rdev->family >= CHIP_RV770) { | 1464 | if (rdev->family >= CHIP_RV770) { |
1147 | pfp_req_size = R700_PFP_UCODE_SIZE * 4; | 1465 | pfp_req_size = R700_PFP_UCODE_SIZE * 4; |
1148 | me_req_size = R700_PM4_UCODE_SIZE * 4; | 1466 | me_req_size = R700_PM4_UCODE_SIZE * 4; |
1467 | rlc_req_size = R700_RLC_UCODE_SIZE * 4; | ||
1149 | } else { | 1468 | } else { |
1150 | pfp_req_size = PFP_UCODE_SIZE * 4; | 1469 | pfp_req_size = PFP_UCODE_SIZE * 4; |
1151 | me_req_size = PM4_UCODE_SIZE * 12; | 1470 | me_req_size = PM4_UCODE_SIZE * 12; |
1471 | rlc_req_size = RLC_UCODE_SIZE * 4; | ||
1152 | } | 1472 | } |
1153 | 1473 | ||
1154 | DRM_INFO("Loading %s CP Microcode\n", chip_name); | 1474 | DRM_INFO("Loading %s Microcode\n", chip_name); |
1155 | 1475 | ||
1156 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); | 1476 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); |
1157 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); | 1477 | err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); |
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev) | |||
1175 | rdev->me_fw->size, fw_name); | 1495 | rdev->me_fw->size, fw_name); |
1176 | err = -EINVAL; | 1496 | err = -EINVAL; |
1177 | } | 1497 | } |
1498 | |||
1499 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name); | ||
1500 | err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev); | ||
1501 | if (err) | ||
1502 | goto out; | ||
1503 | if (rdev->rlc_fw->size != rlc_req_size) { | ||
1504 | printk(KERN_ERR | ||
1505 | "r600_rlc: Bogus length %zu in firmware \"%s\"\n", | ||
1506 | rdev->rlc_fw->size, fw_name); | ||
1507 | err = -EINVAL; | ||
1508 | } | ||
1509 | |||
1178 | out: | 1510 | out: |
1179 | platform_device_unregister(pdev); | 1511 | platform_device_unregister(pdev); |
1180 | 1512 | ||
@@ -1187,6 +1519,8 @@ out: | |||
1187 | rdev->pfp_fw = NULL; | 1519 | rdev->pfp_fw = NULL; |
1188 | release_firmware(rdev->me_fw); | 1520 | release_firmware(rdev->me_fw); |
1189 | rdev->me_fw = NULL; | 1521 | rdev->me_fw = NULL; |
1522 | release_firmware(rdev->rlc_fw); | ||
1523 | rdev->rlc_fw = NULL; | ||
1190 | } | 1524 | } |
1191 | return err; | 1525 | return err; |
1192 | } | 1526 | } |
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev) | |||
1381 | 1715 | ||
1382 | void r600_wb_disable(struct radeon_device *rdev) | 1716 | void r600_wb_disable(struct radeon_device *rdev) |
1383 | { | 1717 | { |
1718 | int r; | ||
1719 | |||
1384 | WREG32(SCRATCH_UMSK, 0); | 1720 | WREG32(SCRATCH_UMSK, 0); |
1385 | if (rdev->wb.wb_obj) { | 1721 | if (rdev->wb.wb_obj) { |
1386 | radeon_object_kunmap(rdev->wb.wb_obj); | 1722 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
1387 | radeon_object_unpin(rdev->wb.wb_obj); | 1723 | if (unlikely(r != 0)) |
1724 | return; | ||
1725 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
1726 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
1727 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
1388 | } | 1728 | } |
1389 | } | 1729 | } |
1390 | 1730 | ||
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev) | |||
1392 | { | 1732 | { |
1393 | r600_wb_disable(rdev); | 1733 | r600_wb_disable(rdev); |
1394 | if (rdev->wb.wb_obj) { | 1734 | if (rdev->wb.wb_obj) { |
1395 | radeon_object_unref(&rdev->wb.wb_obj); | 1735 | radeon_bo_unref(&rdev->wb.wb_obj); |
1396 | rdev->wb.wb = NULL; | 1736 | rdev->wb.wb = NULL; |
1397 | rdev->wb.wb_obj = NULL; | 1737 | rdev->wb.wb_obj = NULL; |
1398 | } | 1738 | } |
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev) | |||
1403 | int r; | 1743 | int r; |
1404 | 1744 | ||
1405 | if (rdev->wb.wb_obj == NULL) { | 1745 | if (rdev->wb.wb_obj == NULL) { |
1406 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | 1746 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
1407 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); | 1747 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
1408 | if (r) { | 1748 | if (r) { |
1409 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); | 1749 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
1410 | return r; | 1750 | return r; |
1411 | } | 1751 | } |
1412 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | 1752 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
1753 | if (unlikely(r != 0)) { | ||
1754 | r600_wb_fini(rdev); | ||
1755 | return r; | ||
1756 | } | ||
1757 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | ||
1413 | &rdev->wb.gpu_addr); | 1758 | &rdev->wb.gpu_addr); |
1414 | if (r) { | 1759 | if (r) { |
1415 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); | 1760 | radeon_bo_unreserve(rdev->wb.wb_obj); |
1761 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
1416 | r600_wb_fini(rdev); | 1762 | r600_wb_fini(rdev); |
1417 | return r; | 1763 | return r; |
1418 | } | 1764 | } |
1419 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1765 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
1766 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
1420 | if (r) { | 1767 | if (r) { |
1421 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); | 1768 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); |
1422 | r600_wb_fini(rdev); | 1769 | r600_wb_fini(rdev); |
1423 | return r; | 1770 | return r; |
1424 | } | 1771 | } |
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev) | |||
1433 | void r600_fence_ring_emit(struct radeon_device *rdev, | 1780 | void r600_fence_ring_emit(struct radeon_device *rdev, |
1434 | struct radeon_fence *fence) | 1781 | struct radeon_fence *fence) |
1435 | { | 1782 | { |
1783 | /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ | ||
1436 | /* Emit fence sequence & fire IRQ */ | 1784 | /* Emit fence sequence & fire IRQ */ |
1437 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1785 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
1438 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 1786 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
1439 | radeon_ring_write(rdev, fence->seq); | 1787 | radeon_ring_write(rdev, fence->seq); |
1788 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | ||
1789 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | ||
1790 | radeon_ring_write(rdev, RB_INT_STAT); | ||
1440 | } | 1791 | } |
1441 | 1792 | ||
1442 | int r600_copy_dma(struct radeon_device *rdev, | 1793 | int r600_copy_dma(struct radeon_device *rdev, |
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev, | |||
1459 | return 0; | 1810 | return 0; |
1460 | } | 1811 | } |
1461 | 1812 | ||
1462 | int r600_irq_process(struct radeon_device *rdev) | ||
1463 | { | ||
1464 | /* FIXME: implement */ | ||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | int r600_irq_set(struct radeon_device *rdev) | ||
1469 | { | ||
1470 | /* FIXME: implement */ | ||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, | 1813 | int r600_set_surface_reg(struct radeon_device *rdev, int reg, |
1475 | uint32_t tiling_flags, uint32_t pitch, | 1814 | uint32_t tiling_flags, uint32_t pitch, |
1476 | uint32_t offset, uint32_t obj_size) | 1815 | uint32_t offset, uint32_t obj_size) |
@@ -1516,13 +1855,26 @@ int r600_startup(struct radeon_device *rdev) | |||
1516 | } | 1855 | } |
1517 | r600_gpu_init(rdev); | 1856 | r600_gpu_init(rdev); |
1518 | 1857 | ||
1519 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1858 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1520 | &rdev->r600_blit.shader_gpu_addr); | 1859 | if (unlikely(r != 0)) |
1860 | return r; | ||
1861 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1862 | &rdev->r600_blit.shader_gpu_addr); | ||
1863 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1521 | if (r) { | 1864 | if (r) { |
1522 | DRM_ERROR("failed to pin blit object %d\n", r); | 1865 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
1523 | return r; | 1866 | return r; |
1524 | } | 1867 | } |
1525 | 1868 | ||
1869 | /* Enable IRQ */ | ||
1870 | r = r600_irq_init(rdev); | ||
1871 | if (r) { | ||
1872 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | ||
1873 | radeon_irq_kms_fini(rdev); | ||
1874 | return r; | ||
1875 | } | ||
1876 | r600_irq_set(rdev); | ||
1877 | |||
1526 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 1878 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
1527 | if (r) | 1879 | if (r) |
1528 | return r; | 1880 | return r; |
@@ -1583,13 +1935,19 @@ int r600_resume(struct radeon_device *rdev) | |||
1583 | 1935 | ||
1584 | int r600_suspend(struct radeon_device *rdev) | 1936 | int r600_suspend(struct radeon_device *rdev) |
1585 | { | 1937 | { |
1938 | int r; | ||
1939 | |||
1586 | /* FIXME: we should wait for ring to be empty */ | 1940 | /* FIXME: we should wait for ring to be empty */ |
1587 | r600_cp_stop(rdev); | 1941 | r600_cp_stop(rdev); |
1588 | rdev->cp.ready = false; | 1942 | rdev->cp.ready = false; |
1589 | r600_wb_disable(rdev); | 1943 | r600_wb_disable(rdev); |
1590 | r600_pcie_gart_disable(rdev); | 1944 | r600_pcie_gart_disable(rdev); |
1591 | /* unpin shaders bo */ | 1945 | /* unpin shaders bo */ |
1592 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1946 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1947 | if (unlikely(r != 0)) | ||
1948 | return r; | ||
1949 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
1950 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1593 | return 0; | 1951 | return 0; |
1594 | } | 1952 | } |
1595 | 1953 | ||
@@ -1627,7 +1985,11 @@ int r600_init(struct radeon_device *rdev) | |||
1627 | if (r) | 1985 | if (r) |
1628 | return r; | 1986 | return r; |
1629 | /* Post card if necessary */ | 1987 | /* Post card if necessary */ |
1630 | if (!r600_card_posted(rdev) && rdev->bios) { | 1988 | if (!r600_card_posted(rdev)) { |
1989 | if (!rdev->bios) { | ||
1990 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
1991 | return -EINVAL; | ||
1992 | } | ||
1631 | DRM_INFO("GPU not posted. posting now...\n"); | 1993 | DRM_INFO("GPU not posted. posting now...\n"); |
1632 | atom_asic_init(rdev->mode_info.atom_context); | 1994 | atom_asic_init(rdev->mode_info.atom_context); |
1633 | } | 1995 | } |
@@ -1650,14 +2012,22 @@ int r600_init(struct radeon_device *rdev) | |||
1650 | if (r) | 2012 | if (r) |
1651 | return r; | 2013 | return r; |
1652 | /* Memory manager */ | 2014 | /* Memory manager */ |
1653 | r = radeon_object_init(rdev); | 2015 | r = radeon_bo_init(rdev); |
2016 | if (r) | ||
2017 | return r; | ||
2018 | |||
2019 | r = radeon_irq_kms_init(rdev); | ||
1654 | if (r) | 2020 | if (r) |
1655 | return r; | 2021 | return r; |
2022 | |||
1656 | rdev->cp.ring_obj = NULL; | 2023 | rdev->cp.ring_obj = NULL; |
1657 | r600_ring_init(rdev, 1024 * 1024); | 2024 | r600_ring_init(rdev, 1024 * 1024); |
1658 | 2025 | ||
1659 | if (!rdev->me_fw || !rdev->pfp_fw) { | 2026 | rdev->ih.ring_obj = NULL; |
1660 | r = r600_cp_init_microcode(rdev); | 2027 | r600_ih_ring_init(rdev, 64 * 1024); |
2028 | |||
2029 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
2030 | r = r600_init_microcode(rdev); | ||
1661 | if (r) { | 2031 | if (r) { |
1662 | DRM_ERROR("Failed to load firmware!\n"); | 2032 | DRM_ERROR("Failed to load firmware!\n"); |
1663 | return r; | 2033 | return r; |
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev) | |||
1704 | r600_suspend(rdev); | 2074 | r600_suspend(rdev); |
1705 | 2075 | ||
1706 | r600_blit_fini(rdev); | 2076 | r600_blit_fini(rdev); |
2077 | r600_irq_fini(rdev); | ||
2078 | radeon_irq_kms_fini(rdev); | ||
1707 | radeon_ring_fini(rdev); | 2079 | radeon_ring_fini(rdev); |
1708 | r600_wb_fini(rdev); | 2080 | r600_wb_fini(rdev); |
1709 | r600_pcie_gart_fini(rdev); | 2081 | r600_pcie_gart_fini(rdev); |
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev) | |||
1712 | radeon_clocks_fini(rdev); | 2084 | radeon_clocks_fini(rdev); |
1713 | if (rdev->flags & RADEON_IS_AGP) | 2085 | if (rdev->flags & RADEON_IS_AGP) |
1714 | radeon_agp_fini(rdev); | 2086 | radeon_agp_fini(rdev); |
1715 | radeon_object_fini(rdev); | 2087 | radeon_bo_fini(rdev); |
1716 | radeon_atombios_fini(rdev); | 2088 | radeon_atombios_fini(rdev); |
1717 | kfree(rdev->bios); | 2089 | kfree(rdev->bios); |
1718 | rdev->bios = NULL; | 2090 | rdev->bios = NULL; |
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev) | |||
1798 | return r; | 2170 | return r; |
1799 | } | 2171 | } |
1800 | 2172 | ||
2173 | /* | ||
2174 | * Interrupts | ||
2175 | * | ||
2176 | * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty | ||
2177 | * the same as the CP ring buffer, but in reverse. Rather than the CPU | ||
2178 | * writing to the ring and the GPU consuming, the GPU writes to the ring | ||
2179 | * and host consumes. As the host irq handler processes interrupts, it | ||
2180 | * increments the rptr. When the rptr catches up with the wptr, all the | ||
2181 | * current interrupts have been processed. | ||
2182 | */ | ||
2183 | |||
2184 | void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | ||
2185 | { | ||
2186 | u32 rb_bufsz; | ||
2187 | |||
2188 | /* Align ring size */ | ||
2189 | rb_bufsz = drm_order(ring_size / 4); | ||
2190 | ring_size = (1 << rb_bufsz) * 4; | ||
2191 | rdev->ih.ring_size = ring_size; | ||
2192 | rdev->ih.align_mask = 4 - 1; | ||
2193 | } | ||
2194 | |||
2195 | static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | ||
2196 | { | ||
2197 | int r; | ||
2198 | |||
2199 | rdev->ih.ring_size = ring_size; | ||
2200 | /* Allocate ring buffer */ | ||
2201 | if (rdev->ih.ring_obj == NULL) { | ||
2202 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | ||
2203 | true, | ||
2204 | RADEON_GEM_DOMAIN_GTT, | ||
2205 | &rdev->ih.ring_obj); | ||
2206 | if (r) { | ||
2207 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); | ||
2208 | return r; | ||
2209 | } | ||
2210 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | ||
2211 | if (unlikely(r != 0)) | ||
2212 | return r; | ||
2213 | r = radeon_bo_pin(rdev->ih.ring_obj, | ||
2214 | RADEON_GEM_DOMAIN_GTT, | ||
2215 | &rdev->ih.gpu_addr); | ||
2216 | if (r) { | ||
2217 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
2218 | DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); | ||
2219 | return r; | ||
2220 | } | ||
2221 | r = radeon_bo_kmap(rdev->ih.ring_obj, | ||
2222 | (void **)&rdev->ih.ring); | ||
2223 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
2224 | if (r) { | ||
2225 | DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); | ||
2226 | return r; | ||
2227 | } | ||
2228 | } | ||
2229 | rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; | ||
2230 | rdev->ih.rptr = 0; | ||
2231 | |||
2232 | return 0; | ||
2233 | } | ||
2234 | |||
2235 | static void r600_ih_ring_fini(struct radeon_device *rdev) | ||
2236 | { | ||
2237 | int r; | ||
2238 | if (rdev->ih.ring_obj) { | ||
2239 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); | ||
2240 | if (likely(r == 0)) { | ||
2241 | radeon_bo_kunmap(rdev->ih.ring_obj); | ||
2242 | radeon_bo_unpin(rdev->ih.ring_obj); | ||
2243 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
2244 | } | ||
2245 | radeon_bo_unref(&rdev->ih.ring_obj); | ||
2246 | rdev->ih.ring = NULL; | ||
2247 | rdev->ih.ring_obj = NULL; | ||
2248 | } | ||
2249 | } | ||
2250 | |||
2251 | static void r600_rlc_stop(struct radeon_device *rdev) | ||
2252 | { | ||
2253 | |||
2254 | if (rdev->family >= CHIP_RV770) { | ||
2255 | /* r7xx asics need to soft reset RLC before halting */ | ||
2256 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); | ||
2257 | RREG32(SRBM_SOFT_RESET); | ||
2258 | udelay(15000); | ||
2259 | WREG32(SRBM_SOFT_RESET, 0); | ||
2260 | RREG32(SRBM_SOFT_RESET); | ||
2261 | } | ||
2262 | |||
2263 | WREG32(RLC_CNTL, 0); | ||
2264 | } | ||
2265 | |||
2266 | static void r600_rlc_start(struct radeon_device *rdev) | ||
2267 | { | ||
2268 | WREG32(RLC_CNTL, RLC_ENABLE); | ||
2269 | } | ||
2270 | |||
2271 | static int r600_rlc_init(struct radeon_device *rdev) | ||
2272 | { | ||
2273 | u32 i; | ||
2274 | const __be32 *fw_data; | ||
2275 | |||
2276 | if (!rdev->rlc_fw) | ||
2277 | return -EINVAL; | ||
2278 | |||
2279 | r600_rlc_stop(rdev); | ||
2280 | |||
2281 | WREG32(RLC_HB_BASE, 0); | ||
2282 | WREG32(RLC_HB_CNTL, 0); | ||
2283 | WREG32(RLC_HB_RPTR, 0); | ||
2284 | WREG32(RLC_HB_WPTR, 0); | ||
2285 | WREG32(RLC_HB_WPTR_LSB_ADDR, 0); | ||
2286 | WREG32(RLC_HB_WPTR_MSB_ADDR, 0); | ||
2287 | WREG32(RLC_MC_CNTL, 0); | ||
2288 | WREG32(RLC_UCODE_CNTL, 0); | ||
2289 | |||
2290 | fw_data = (const __be32 *)rdev->rlc_fw->data; | ||
2291 | if (rdev->family >= CHIP_RV770) { | ||
2292 | for (i = 0; i < R700_RLC_UCODE_SIZE; i++) { | ||
2293 | WREG32(RLC_UCODE_ADDR, i); | ||
2294 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
2295 | } | ||
2296 | } else { | ||
2297 | for (i = 0; i < RLC_UCODE_SIZE; i++) { | ||
2298 | WREG32(RLC_UCODE_ADDR, i); | ||
2299 | WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++)); | ||
2300 | } | ||
2301 | } | ||
2302 | WREG32(RLC_UCODE_ADDR, 0); | ||
2303 | |||
2304 | r600_rlc_start(rdev); | ||
2305 | |||
2306 | return 0; | ||
2307 | } | ||
2308 | |||
2309 | static void r600_enable_interrupts(struct radeon_device *rdev) | ||
2310 | { | ||
2311 | u32 ih_cntl = RREG32(IH_CNTL); | ||
2312 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | ||
2313 | |||
2314 | ih_cntl |= ENABLE_INTR; | ||
2315 | ih_rb_cntl |= IH_RB_ENABLE; | ||
2316 | WREG32(IH_CNTL, ih_cntl); | ||
2317 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
2318 | rdev->ih.enabled = true; | ||
2319 | } | ||
2320 | |||
2321 | static void r600_disable_interrupts(struct radeon_device *rdev) | ||
2322 | { | ||
2323 | u32 ih_rb_cntl = RREG32(IH_RB_CNTL); | ||
2324 | u32 ih_cntl = RREG32(IH_CNTL); | ||
2325 | |||
2326 | ih_rb_cntl &= ~IH_RB_ENABLE; | ||
2327 | ih_cntl &= ~ENABLE_INTR; | ||
2328 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
2329 | WREG32(IH_CNTL, ih_cntl); | ||
2330 | /* set rptr, wptr to 0 */ | ||
2331 | WREG32(IH_RB_RPTR, 0); | ||
2332 | WREG32(IH_RB_WPTR, 0); | ||
2333 | rdev->ih.enabled = false; | ||
2334 | rdev->ih.wptr = 0; | ||
2335 | rdev->ih.rptr = 0; | ||
2336 | } | ||
2337 | |||
2338 | static void r600_disable_interrupt_state(struct radeon_device *rdev) | ||
2339 | { | ||
2340 | u32 tmp; | ||
2341 | |||
2342 | WREG32(CP_INT_CNTL, 0); | ||
2343 | WREG32(GRBM_INT_CNTL, 0); | ||
2344 | WREG32(DxMODE_INT_MASK, 0); | ||
2345 | if (ASIC_IS_DCE3(rdev)) { | ||
2346 | WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0); | ||
2347 | WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0); | ||
2348 | tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2349 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
2350 | tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2351 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
2352 | tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2353 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
2354 | tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2355 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
2356 | if (ASIC_IS_DCE32(rdev)) { | ||
2357 | tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2358 | WREG32(DC_HPD5_INT_CONTROL, 0); | ||
2359 | tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; | ||
2360 | WREG32(DC_HPD6_INT_CONTROL, 0); | ||
2361 | } | ||
2362 | } else { | ||
2363 | WREG32(DACA_AUTODETECT_INT_CONTROL, 0); | ||
2364 | WREG32(DACB_AUTODETECT_INT_CONTROL, 0); | ||
2365 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
2366 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0); | ||
2367 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
2368 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0); | ||
2369 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY; | ||
2370 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0); | ||
2371 | } | ||
2372 | } | ||
2373 | |||
2374 | int r600_irq_init(struct radeon_device *rdev) | ||
2375 | { | ||
2376 | int ret = 0; | ||
2377 | int rb_bufsz; | ||
2378 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | ||
2379 | |||
2380 | /* allocate ring */ | ||
2381 | ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); | ||
2382 | if (ret) | ||
2383 | return ret; | ||
2384 | |||
2385 | /* disable irqs */ | ||
2386 | r600_disable_interrupts(rdev); | ||
2387 | |||
2388 | /* init rlc */ | ||
2389 | ret = r600_rlc_init(rdev); | ||
2390 | if (ret) { | ||
2391 | r600_ih_ring_fini(rdev); | ||
2392 | return ret; | ||
2393 | } | ||
2394 | |||
2395 | /* setup interrupt control */ | ||
2396 | /* set dummy read address to ring address */ | ||
2397 | WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); | ||
2398 | interrupt_cntl = RREG32(INTERRUPT_CNTL); | ||
2399 | /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi | ||
2400 | * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN | ||
2401 | */ | ||
2402 | interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; | ||
2403 | /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */ | ||
2404 | interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; | ||
2405 | WREG32(INTERRUPT_CNTL, interrupt_cntl); | ||
2406 | |||
2407 | WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8); | ||
2408 | rb_bufsz = drm_order(rdev->ih.ring_size / 4); | ||
2409 | |||
2410 | ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE | | ||
2411 | IH_WPTR_OVERFLOW_CLEAR | | ||
2412 | (rb_bufsz << 1)); | ||
2413 | /* WPTR writeback, not yet */ | ||
2414 | /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/ | ||
2415 | WREG32(IH_RB_WPTR_ADDR_LO, 0); | ||
2416 | WREG32(IH_RB_WPTR_ADDR_HI, 0); | ||
2417 | |||
2418 | WREG32(IH_RB_CNTL, ih_rb_cntl); | ||
2419 | |||
2420 | /* set rptr, wptr to 0 */ | ||
2421 | WREG32(IH_RB_RPTR, 0); | ||
2422 | WREG32(IH_RB_WPTR, 0); | ||
2423 | |||
2424 | /* Default settings for IH_CNTL (disabled at first) */ | ||
2425 | ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10); | ||
2426 | /* RPTR_REARM only works if msi's are enabled */ | ||
2427 | if (rdev->msi_enabled) | ||
2428 | ih_cntl |= RPTR_REARM; | ||
2429 | |||
2430 | #ifdef __BIG_ENDIAN | ||
2431 | ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT); | ||
2432 | #endif | ||
2433 | WREG32(IH_CNTL, ih_cntl); | ||
2434 | |||
2435 | /* force the active interrupt state to all disabled */ | ||
2436 | r600_disable_interrupt_state(rdev); | ||
2437 | |||
2438 | /* enable irqs */ | ||
2439 | r600_enable_interrupts(rdev); | ||
2440 | |||
2441 | return ret; | ||
2442 | } | ||
2443 | |||
2444 | void r600_irq_fini(struct radeon_device *rdev) | ||
2445 | { | ||
2446 | r600_disable_interrupts(rdev); | ||
2447 | r600_rlc_stop(rdev); | ||
2448 | r600_ih_ring_fini(rdev); | ||
2449 | } | ||
2450 | |||
2451 | int r600_irq_set(struct radeon_device *rdev) | ||
2452 | { | ||
2453 | u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; | ||
2454 | u32 mode_int = 0; | ||
2455 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | ||
2456 | |||
2457 | /* don't enable anything if the ih is disabled */ | ||
2458 | if (!rdev->ih.enabled) | ||
2459 | return 0; | ||
2460 | |||
2461 | if (ASIC_IS_DCE3(rdev)) { | ||
2462 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2463 | hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2464 | hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2465 | hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2466 | if (ASIC_IS_DCE32(rdev)) { | ||
2467 | hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2468 | hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2469 | } | ||
2470 | } else { | ||
2471 | hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2472 | hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2473 | hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN; | ||
2474 | } | ||
2475 | |||
2476 | if (rdev->irq.sw_int) { | ||
2477 | DRM_DEBUG("r600_irq_set: sw int\n"); | ||
2478 | cp_int_cntl |= RB_INT_ENABLE; | ||
2479 | } | ||
2480 | if (rdev->irq.crtc_vblank_int[0]) { | ||
2481 | DRM_DEBUG("r600_irq_set: vblank 0\n"); | ||
2482 | mode_int |= D1MODE_VBLANK_INT_MASK; | ||
2483 | } | ||
2484 | if (rdev->irq.crtc_vblank_int[1]) { | ||
2485 | DRM_DEBUG("r600_irq_set: vblank 1\n"); | ||
2486 | mode_int |= D2MODE_VBLANK_INT_MASK; | ||
2487 | } | ||
2488 | if (rdev->irq.hpd[0]) { | ||
2489 | DRM_DEBUG("r600_irq_set: hpd 1\n"); | ||
2490 | hpd1 |= DC_HPDx_INT_EN; | ||
2491 | } | ||
2492 | if (rdev->irq.hpd[1]) { | ||
2493 | DRM_DEBUG("r600_irq_set: hpd 2\n"); | ||
2494 | hpd2 |= DC_HPDx_INT_EN; | ||
2495 | } | ||
2496 | if (rdev->irq.hpd[2]) { | ||
2497 | DRM_DEBUG("r600_irq_set: hpd 3\n"); | ||
2498 | hpd3 |= DC_HPDx_INT_EN; | ||
2499 | } | ||
2500 | if (rdev->irq.hpd[3]) { | ||
2501 | DRM_DEBUG("r600_irq_set: hpd 4\n"); | ||
2502 | hpd4 |= DC_HPDx_INT_EN; | ||
2503 | } | ||
2504 | if (rdev->irq.hpd[4]) { | ||
2505 | DRM_DEBUG("r600_irq_set: hpd 5\n"); | ||
2506 | hpd5 |= DC_HPDx_INT_EN; | ||
2507 | } | ||
2508 | if (rdev->irq.hpd[5]) { | ||
2509 | DRM_DEBUG("r600_irq_set: hpd 6\n"); | ||
2510 | hpd6 |= DC_HPDx_INT_EN; | ||
2511 | } | ||
2512 | |||
2513 | WREG32(CP_INT_CNTL, cp_int_cntl); | ||
2514 | WREG32(DxMODE_INT_MASK, mode_int); | ||
2515 | if (ASIC_IS_DCE3(rdev)) { | ||
2516 | WREG32(DC_HPD1_INT_CONTROL, hpd1); | ||
2517 | WREG32(DC_HPD2_INT_CONTROL, hpd2); | ||
2518 | WREG32(DC_HPD3_INT_CONTROL, hpd3); | ||
2519 | WREG32(DC_HPD4_INT_CONTROL, hpd4); | ||
2520 | if (ASIC_IS_DCE32(rdev)) { | ||
2521 | WREG32(DC_HPD5_INT_CONTROL, hpd5); | ||
2522 | WREG32(DC_HPD6_INT_CONTROL, hpd6); | ||
2523 | } | ||
2524 | } else { | ||
2525 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); | ||
2526 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | ||
2527 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3); | ||
2528 | } | ||
2529 | |||
2530 | return 0; | ||
2531 | } | ||
1801 | 2532 | ||
2533 | static inline void r600_irq_ack(struct radeon_device *rdev, | ||
2534 | u32 *disp_int, | ||
2535 | u32 *disp_int_cont, | ||
2536 | u32 *disp_int_cont2) | ||
2537 | { | ||
2538 | u32 tmp; | ||
1802 | 2539 | ||
2540 | if (ASIC_IS_DCE3(rdev)) { | ||
2541 | *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS); | ||
2542 | *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE); | ||
2543 | *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2); | ||
2544 | } else { | ||
2545 | *disp_int = RREG32(DISP_INTERRUPT_STATUS); | ||
2546 | *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); | ||
2547 | *disp_int_cont2 = 0; | ||
2548 | } | ||
2549 | |||
2550 | if (*disp_int & LB_D1_VBLANK_INTERRUPT) | ||
2551 | WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | ||
2552 | if (*disp_int & LB_D1_VLINE_INTERRUPT) | ||
2553 | WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | ||
2554 | if (*disp_int & LB_D2_VBLANK_INTERRUPT) | ||
2555 | WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK); | ||
2556 | if (*disp_int & LB_D2_VLINE_INTERRUPT) | ||
2557 | WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK); | ||
2558 | if (*disp_int & DC_HPD1_INTERRUPT) { | ||
2559 | if (ASIC_IS_DCE3(rdev)) { | ||
2560 | tmp = RREG32(DC_HPD1_INT_CONTROL); | ||
2561 | tmp |= DC_HPDx_INT_ACK; | ||
2562 | WREG32(DC_HPD1_INT_CONTROL, tmp); | ||
2563 | } else { | ||
2564 | tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL); | ||
2565 | tmp |= DC_HPDx_INT_ACK; | ||
2566 | WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | ||
2567 | } | ||
2568 | } | ||
2569 | if (*disp_int & DC_HPD2_INTERRUPT) { | ||
2570 | if (ASIC_IS_DCE3(rdev)) { | ||
2571 | tmp = RREG32(DC_HPD2_INT_CONTROL); | ||
2572 | tmp |= DC_HPDx_INT_ACK; | ||
2573 | WREG32(DC_HPD2_INT_CONTROL, tmp); | ||
2574 | } else { | ||
2575 | tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL); | ||
2576 | tmp |= DC_HPDx_INT_ACK; | ||
2577 | WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | ||
2578 | } | ||
2579 | } | ||
2580 | if (*disp_int_cont & DC_HPD3_INTERRUPT) { | ||
2581 | if (ASIC_IS_DCE3(rdev)) { | ||
2582 | tmp = RREG32(DC_HPD3_INT_CONTROL); | ||
2583 | tmp |= DC_HPDx_INT_ACK; | ||
2584 | WREG32(DC_HPD3_INT_CONTROL, tmp); | ||
2585 | } else { | ||
2586 | tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL); | ||
2587 | tmp |= DC_HPDx_INT_ACK; | ||
2588 | WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp); | ||
2589 | } | ||
2590 | } | ||
2591 | if (*disp_int_cont & DC_HPD4_INTERRUPT) { | ||
2592 | tmp = RREG32(DC_HPD4_INT_CONTROL); | ||
2593 | tmp |= DC_HPDx_INT_ACK; | ||
2594 | WREG32(DC_HPD4_INT_CONTROL, tmp); | ||
2595 | } | ||
2596 | if (ASIC_IS_DCE32(rdev)) { | ||
2597 | if (*disp_int_cont2 & DC_HPD5_INTERRUPT) { | ||
2598 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
2599 | tmp |= DC_HPDx_INT_ACK; | ||
2600 | WREG32(DC_HPD5_INT_CONTROL, tmp); | ||
2601 | } | ||
2602 | if (*disp_int_cont2 & DC_HPD6_INTERRUPT) { | ||
2603 | tmp = RREG32(DC_HPD5_INT_CONTROL); | ||
2604 | tmp |= DC_HPDx_INT_ACK; | ||
2605 | WREG32(DC_HPD6_INT_CONTROL, tmp); | ||
2606 | } | ||
2607 | } | ||
2608 | } | ||
2609 | |||
2610 | void r600_irq_disable(struct radeon_device *rdev) | ||
2611 | { | ||
2612 | u32 disp_int, disp_int_cont, disp_int_cont2; | ||
2613 | |||
2614 | r600_disable_interrupts(rdev); | ||
2615 | /* Wait and acknowledge irq */ | ||
2616 | mdelay(1); | ||
2617 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | ||
2618 | r600_disable_interrupt_state(rdev); | ||
2619 | } | ||
2620 | |||
2621 | static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | ||
2622 | { | ||
2623 | u32 wptr, tmp; | ||
2624 | |||
2625 | /* XXX use writeback */ | ||
2626 | wptr = RREG32(IH_RB_WPTR); | ||
2627 | |||
2628 | if (wptr & RB_OVERFLOW) { | ||
2629 | WARN_ON(1); | ||
2630 | /* XXX deal with overflow */ | ||
2631 | DRM_ERROR("IH RB overflow\n"); | ||
2632 | tmp = RREG32(IH_RB_CNTL); | ||
2633 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | ||
2634 | WREG32(IH_RB_CNTL, tmp); | ||
2635 | } | ||
2636 | wptr = wptr & WPTR_OFFSET_MASK; | ||
2637 | |||
2638 | return wptr; | ||
2639 | } | ||
2640 | |||
2641 | /* r600 IV Ring | ||
2642 | * Each IV ring entry is 128 bits: | ||
2643 | * [7:0] - interrupt source id | ||
2644 | * [31:8] - reserved | ||
2645 | * [59:32] - interrupt source data | ||
2646 | * [127:60] - reserved | ||
2647 | * | ||
2648 | * The basic interrupt vector entries | ||
2649 | * are decoded as follows: | ||
2650 | * src_id src_data description | ||
2651 | * 1 0 D1 Vblank | ||
2652 | * 1 1 D1 Vline | ||
2653 | * 5 0 D2 Vblank | ||
2654 | * 5 1 D2 Vline | ||
2655 | * 19 0 FP Hot plug detection A | ||
2656 | * 19 1 FP Hot plug detection B | ||
2657 | * 19 2 DAC A auto-detection | ||
2658 | * 19 3 DAC B auto-detection | ||
2659 | * 176 - CP_INT RB | ||
2660 | * 177 - CP_INT IB1 | ||
2661 | * 178 - CP_INT IB2 | ||
2662 | * 181 - EOP Interrupt | ||
2663 | * 233 - GUI Idle | ||
2664 | * | ||
2665 | * Note, these are based on r600 and may need to be | ||
2666 | * adjusted or added to on newer asics | ||
2667 | */ | ||
2668 | |||
2669 | int r600_irq_process(struct radeon_device *rdev) | ||
2670 | { | ||
2671 | u32 wptr = r600_get_ih_wptr(rdev); | ||
2672 | u32 rptr = rdev->ih.rptr; | ||
2673 | u32 src_id, src_data; | ||
2674 | u32 last_entry = rdev->ih.ring_size - 16; | ||
2675 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | ||
2676 | unsigned long flags; | ||
2677 | bool queue_hotplug = false; | ||
2678 | |||
2679 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | ||
2680 | |||
2681 | spin_lock_irqsave(&rdev->ih.lock, flags); | ||
2682 | |||
2683 | if (rptr == wptr) { | ||
2684 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
2685 | return IRQ_NONE; | ||
2686 | } | ||
2687 | if (rdev->shutdown) { | ||
2688 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
2689 | return IRQ_NONE; | ||
2690 | } | ||
2691 | |||
2692 | restart_ih: | ||
2693 | /* display interrupts */ | ||
2694 | r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2); | ||
2695 | |||
2696 | rdev->ih.wptr = wptr; | ||
2697 | while (rptr != wptr) { | ||
2698 | /* wptr/rptr are in bytes! */ | ||
2699 | ring_index = rptr / 4; | ||
2700 | src_id = rdev->ih.ring[ring_index] & 0xff; | ||
2701 | src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; | ||
2702 | |||
2703 | switch (src_id) { | ||
2704 | case 1: /* D1 vblank/vline */ | ||
2705 | switch (src_data) { | ||
2706 | case 0: /* D1 vblank */ | ||
2707 | if (disp_int & LB_D1_VBLANK_INTERRUPT) { | ||
2708 | drm_handle_vblank(rdev->ddev, 0); | ||
2709 | disp_int &= ~LB_D1_VBLANK_INTERRUPT; | ||
2710 | DRM_DEBUG("IH: D1 vblank\n"); | ||
2711 | } | ||
2712 | break; | ||
2713 | case 1: /* D1 vline */ | ||
2714 | if (disp_int & LB_D1_VLINE_INTERRUPT) { | ||
2715 | disp_int &= ~LB_D1_VLINE_INTERRUPT; | ||
2716 | DRM_DEBUG("IH: D1 vline\n"); | ||
2717 | } | ||
2718 | break; | ||
2719 | default: | ||
2720 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
2721 | break; | ||
2722 | } | ||
2723 | break; | ||
2724 | case 5: /* D2 vblank/vline */ | ||
2725 | switch (src_data) { | ||
2726 | case 0: /* D2 vblank */ | ||
2727 | if (disp_int & LB_D2_VBLANK_INTERRUPT) { | ||
2728 | drm_handle_vblank(rdev->ddev, 1); | ||
2729 | disp_int &= ~LB_D2_VBLANK_INTERRUPT; | ||
2730 | DRM_DEBUG("IH: D2 vblank\n"); | ||
2731 | } | ||
2732 | break; | ||
2733 | case 1: /* D1 vline */ | ||
2734 | if (disp_int & LB_D2_VLINE_INTERRUPT) { | ||
2735 | disp_int &= ~LB_D2_VLINE_INTERRUPT; | ||
2736 | DRM_DEBUG("IH: D2 vline\n"); | ||
2737 | } | ||
2738 | break; | ||
2739 | default: | ||
2740 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
2741 | break; | ||
2742 | } | ||
2743 | break; | ||
2744 | case 19: /* HPD/DAC hotplug */ | ||
2745 | switch (src_data) { | ||
2746 | case 0: | ||
2747 | if (disp_int & DC_HPD1_INTERRUPT) { | ||
2748 | disp_int &= ~DC_HPD1_INTERRUPT; | ||
2749 | queue_hotplug = true; | ||
2750 | DRM_DEBUG("IH: HPD1\n"); | ||
2751 | } | ||
2752 | break; | ||
2753 | case 1: | ||
2754 | if (disp_int & DC_HPD2_INTERRUPT) { | ||
2755 | disp_int &= ~DC_HPD2_INTERRUPT; | ||
2756 | queue_hotplug = true; | ||
2757 | DRM_DEBUG("IH: HPD2\n"); | ||
2758 | } | ||
2759 | break; | ||
2760 | case 4: | ||
2761 | if (disp_int_cont & DC_HPD3_INTERRUPT) { | ||
2762 | disp_int_cont &= ~DC_HPD3_INTERRUPT; | ||
2763 | queue_hotplug = true; | ||
2764 | DRM_DEBUG("IH: HPD3\n"); | ||
2765 | } | ||
2766 | break; | ||
2767 | case 5: | ||
2768 | if (disp_int_cont & DC_HPD4_INTERRUPT) { | ||
2769 | disp_int_cont &= ~DC_HPD4_INTERRUPT; | ||
2770 | queue_hotplug = true; | ||
2771 | DRM_DEBUG("IH: HPD4\n"); | ||
2772 | } | ||
2773 | break; | ||
2774 | case 10: | ||
2775 | if (disp_int_cont2 & DC_HPD5_INTERRUPT) { | ||
2776 | disp_int_cont &= ~DC_HPD5_INTERRUPT; | ||
2777 | queue_hotplug = true; | ||
2778 | DRM_DEBUG("IH: HPD5\n"); | ||
2779 | } | ||
2780 | break; | ||
2781 | case 12: | ||
2782 | if (disp_int_cont2 & DC_HPD6_INTERRUPT) { | ||
2783 | disp_int_cont &= ~DC_HPD6_INTERRUPT; | ||
2784 | queue_hotplug = true; | ||
2785 | DRM_DEBUG("IH: HPD6\n"); | ||
2786 | } | ||
2787 | break; | ||
2788 | default: | ||
2789 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
2790 | break; | ||
2791 | } | ||
2792 | break; | ||
2793 | case 176: /* CP_INT in ring buffer */ | ||
2794 | case 177: /* CP_INT in IB1 */ | ||
2795 | case 178: /* CP_INT in IB2 */ | ||
2796 | DRM_DEBUG("IH: CP int: 0x%08x\n", src_data); | ||
2797 | radeon_fence_process(rdev); | ||
2798 | break; | ||
2799 | case 181: /* CP EOP event */ | ||
2800 | DRM_DEBUG("IH: CP EOP\n"); | ||
2801 | break; | ||
2802 | default: | ||
2803 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | ||
2804 | break; | ||
2805 | } | ||
2806 | |||
2807 | /* wptr/rptr are in bytes! */ | ||
2808 | if (rptr == last_entry) | ||
2809 | rptr = 0; | ||
2810 | else | ||
2811 | rptr += 16; | ||
2812 | } | ||
2813 | /* make sure wptr hasn't changed while processing */ | ||
2814 | wptr = r600_get_ih_wptr(rdev); | ||
2815 | if (wptr != rdev->ih.wptr) | ||
2816 | goto restart_ih; | ||
2817 | if (queue_hotplug) | ||
2818 | queue_work(rdev->wq, &rdev->hotplug_work); | ||
2819 | rdev->ih.rptr = rptr; | ||
2820 | WREG32(IH_RB_RPTR, rdev->ih.rptr); | ||
2821 | spin_unlock_irqrestore(&rdev->ih.lock, flags); | ||
2822 | return IRQ_HANDLED; | ||
2823 | } | ||
1803 | 2824 | ||
1804 | /* | 2825 | /* |
1805 | * Debugfs info | 2826 | * Debugfs info |
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data) | |||
1811 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 2832 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
1812 | struct drm_device *dev = node->minor->dev; | 2833 | struct drm_device *dev = node->minor->dev; |
1813 | struct radeon_device *rdev = dev->dev_private; | 2834 | struct radeon_device *rdev = dev->dev_private; |
1814 | uint32_t rdp, wdp; | ||
1815 | unsigned count, i, j; | 2835 | unsigned count, i, j; |
1816 | 2836 | ||
1817 | radeon_ring_free_size(rdev); | 2837 | radeon_ring_free_size(rdev); |
1818 | rdp = RREG32(CP_RB_RPTR); | 2838 | count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw; |
1819 | wdp = RREG32(CP_RB_WPTR); | ||
1820 | count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask; | ||
1821 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); | 2839 | seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); |
1822 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); | 2840 | seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR)); |
1823 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); | 2841 | seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR)); |
2842 | seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr); | ||
2843 | seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr); | ||
1824 | seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); | 2844 | seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); |
1825 | seq_printf(m, "%u dwords in ring\n", count); | 2845 | seq_printf(m, "%u dwords in ring\n", count); |
2846 | i = rdev->cp.rptr; | ||
1826 | for (j = 0; j <= count; j++) { | 2847 | for (j = 0; j <= count; j++) { |
1827 | i = (rdp + j) & rdev->cp.ptr_mask; | ||
1828 | seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); | 2848 | seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); |
2849 | i = (i + 1) & rdev->cp.ptr_mask; | ||
1829 | } | 2850 | } |
1830 | return 0; | 2851 | return 0; |
1831 | } | 2852 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index dbf716e1fbf3..9aecafb51b66 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev) | |||
473 | obj_size += r6xx_ps_size * 4; | 473 | obj_size += r6xx_ps_size * 4; |
474 | obj_size = ALIGN(obj_size, 256); | 474 | obj_size = ALIGN(obj_size, 256); |
475 | 475 | ||
476 | r = radeon_object_create(rdev, NULL, obj_size, | 476 | r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, |
477 | true, RADEON_GEM_DOMAIN_VRAM, | 477 | &rdev->r600_blit.shader_obj); |
478 | false, &rdev->r600_blit.shader_obj); | ||
479 | if (r) { | 478 | if (r) { |
480 | DRM_ERROR("r600 failed to allocate shader\n"); | 479 | DRM_ERROR("r600 failed to allocate shader\n"); |
481 | return r; | 480 | return r; |
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev) | |||
485 | obj_size, | 484 | obj_size, |
486 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); | 485 | rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); |
487 | 486 | ||
488 | r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); | 487 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
488 | if (unlikely(r != 0)) | ||
489 | return r; | ||
490 | r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); | ||
489 | if (r) { | 491 | if (r) { |
490 | DRM_ERROR("failed to map blit object %d\n", r); | 492 | DRM_ERROR("failed to map blit object %d\n", r); |
491 | return r; | 493 | return r; |
492 | } | 494 | } |
493 | |||
494 | if (rdev->family >= CHIP_RV770) | 495 | if (rdev->family >= CHIP_RV770) |
495 | memcpy_toio(ptr + rdev->r600_blit.state_offset, | 496 | memcpy_toio(ptr + rdev->r600_blit.state_offset, |
496 | r7xx_default_state, rdev->r600_blit.state_len * 4); | 497 | r7xx_default_state, rdev->r600_blit.state_len * 4); |
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev) | |||
500 | if (num_packet2s) | 501 | if (num_packet2s) |
501 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), | 502 | memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), |
502 | packet2s, num_packet2s * 4); | 503 | packet2s, num_packet2s * 4); |
503 | |||
504 | |||
505 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); | 504 | memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); |
506 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); | 505 | memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); |
507 | 506 | radeon_bo_kunmap(rdev->r600_blit.shader_obj); | |
508 | radeon_object_kunmap(rdev->r600_blit.shader_obj); | 507 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
509 | return 0; | 508 | return 0; |
510 | } | 509 | } |
511 | 510 | ||
512 | void r600_blit_fini(struct radeon_device *rdev) | 511 | void r600_blit_fini(struct radeon_device *rdev) |
513 | { | 512 | { |
514 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 513 | int r; |
515 | radeon_object_unref(&rdev->r600_blit.shader_obj); | 514 | |
515 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
516 | if (unlikely(r != 0)) { | ||
517 | dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r); | ||
518 | goto out_unref; | ||
519 | } | ||
520 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
521 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
522 | out_unref: | ||
523 | radeon_bo_unref(&rdev->r600_blit.shader_obj); | ||
516 | } | 524 | } |
517 | 525 | ||
518 | int r600_vb_ib_get(struct radeon_device *rdev) | 526 | int r600_vb_ib_get(struct radeon_device *rdev) |
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) | |||
569 | ring_size = num_loops * dwords_per_loop; | 577 | ring_size = num_loops * dwords_per_loop; |
570 | /* set default + shaders */ | 578 | /* set default + shaders */ |
571 | ring_size += 40; /* shaders + def state */ | 579 | ring_size += 40; /* shaders + def state */ |
572 | ring_size += 3; /* fence emit for VB IB */ | 580 | ring_size += 5; /* fence emit for VB IB */ |
573 | ring_size += 5; /* done copy */ | 581 | ring_size += 5; /* done copy */ |
574 | ring_size += 3; /* fence emit for done copy */ | 582 | ring_size += 5; /* fence emit for done copy */ |
575 | r = radeon_ring_lock(rdev, ring_size); | 583 | r = radeon_ring_lock(rdev, ring_size); |
576 | WARN_ON(r); | 584 | WARN_ON(r); |
577 | 585 | ||
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 17e42195c632..0d820764f340 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
@@ -466,6 +466,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
466 | for (i = 0; i < pkt->count; i++) { | 466 | for (i = 0; i < pkt->count; i++) { |
467 | reg = start_reg + (4 * i); | 467 | reg = start_reg + (4 * i); |
468 | switch (reg) { | 468 | switch (reg) { |
469 | case SQ_ESGS_RING_BASE: | ||
470 | case SQ_GSVS_RING_BASE: | ||
471 | case SQ_ESTMP_RING_BASE: | ||
472 | case SQ_GSTMP_RING_BASE: | ||
473 | case SQ_VSTMP_RING_BASE: | ||
474 | case SQ_PSTMP_RING_BASE: | ||
475 | case SQ_FBUF_RING_BASE: | ||
476 | case SQ_REDUC_RING_BASE: | ||
477 | case SX_MEMORY_EXPORT_BASE: | ||
478 | r = r600_cs_packet_next_reloc(p, &reloc); | ||
479 | if (r) { | ||
480 | DRM_ERROR("bad SET_CONFIG_REG " | ||
481 | "0x%04X\n", reg); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); | ||
485 | break; | ||
469 | case CP_COHER_BASE: | 486 | case CP_COHER_BASE: |
470 | /* use PACKET3_SURFACE_SYNC */ | 487 | /* use PACKET3_SURFACE_SYNC */ |
471 | return -EINVAL; | 488 | return -EINVAL; |
@@ -487,6 +504,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, | |||
487 | reg = start_reg + (4 * i); | 504 | reg = start_reg + (4 * i); |
488 | switch (reg) { | 505 | switch (reg) { |
489 | case DB_DEPTH_BASE: | 506 | case DB_DEPTH_BASE: |
507 | case DB_HTILE_DATA_BASE: | ||
490 | case CB_COLOR0_BASE: | 508 | case CB_COLOR0_BASE: |
491 | case CB_COLOR1_BASE: | 509 | case CB_COLOR1_BASE: |
492 | case CB_COLOR2_BASE: | 510 | case CB_COLOR2_BASE: |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 00d9642198a3..05894edadab4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -119,6 +119,7 @@ | |||
119 | #define DB_DEBUG 0x9830 | 119 | #define DB_DEBUG 0x9830 |
120 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) | 120 | #define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31) |
121 | #define DB_DEPTH_BASE 0x2800C | 121 | #define DB_DEPTH_BASE 0x2800C |
122 | #define DB_HTILE_DATA_BASE 0x28014 | ||
122 | #define DB_WATERMARKS 0x9838 | 123 | #define DB_WATERMARKS 0x9838 |
123 | #define DEPTH_FREE(x) ((x) << 0) | 124 | #define DEPTH_FREE(x) ((x) << 0) |
124 | #define DEPTH_FLUSH(x) ((x) << 5) | 125 | #define DEPTH_FLUSH(x) ((x) << 5) |
@@ -171,6 +172,14 @@ | |||
171 | #define SQ_STACK_RESOURCE_MGMT_2 0x8c14 | 172 | #define SQ_STACK_RESOURCE_MGMT_2 0x8c14 |
172 | # define NUM_GS_STACK_ENTRIES(x) ((x) << 0) | 173 | # define NUM_GS_STACK_ENTRIES(x) ((x) << 0) |
173 | # define NUM_ES_STACK_ENTRIES(x) ((x) << 16) | 174 | # define NUM_ES_STACK_ENTRIES(x) ((x) << 16) |
175 | #define SQ_ESGS_RING_BASE 0x8c40 | ||
176 | #define SQ_GSVS_RING_BASE 0x8c48 | ||
177 | #define SQ_ESTMP_RING_BASE 0x8c50 | ||
178 | #define SQ_GSTMP_RING_BASE 0x8c58 | ||
179 | #define SQ_VSTMP_RING_BASE 0x8c60 | ||
180 | #define SQ_PSTMP_RING_BASE 0x8c68 | ||
181 | #define SQ_FBUF_RING_BASE 0x8c70 | ||
182 | #define SQ_REDUC_RING_BASE 0x8c78 | ||
174 | 183 | ||
175 | #define GRBM_CNTL 0x8000 | 184 | #define GRBM_CNTL 0x8000 |
176 | # define GRBM_READ_TIMEOUT(x) ((x) << 0) | 185 | # define GRBM_READ_TIMEOUT(x) ((x) << 0) |
@@ -356,6 +365,7 @@ | |||
356 | 365 | ||
357 | 366 | ||
358 | #define SX_MISC 0x28350 | 367 | #define SX_MISC 0x28350 |
368 | #define SX_MEMORY_EXPORT_BASE 0x9010 | ||
359 | #define SX_DEBUG_1 0x9054 | 369 | #define SX_DEBUG_1 0x9054 |
360 | #define SMX_EVENT_RELEASE (1 << 0) | 370 | #define SMX_EVENT_RELEASE (1 << 0) |
361 | #define ENABLE_NEW_SMX_ADDRESS (1 << 16) | 371 | #define ENABLE_NEW_SMX_ADDRESS (1 << 16) |
@@ -446,7 +456,215 @@ | |||
446 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) | 456 | #define WAIT_2D_IDLECLEAN_bit (1 << 16) |
447 | #define WAIT_3D_IDLECLEAN_bit (1 << 17) | 457 | #define WAIT_3D_IDLECLEAN_bit (1 << 17) |
448 | 458 | ||
449 | 459 | #define IH_RB_CNTL 0x3e00 | |
460 | # define IH_RB_ENABLE (1 << 0) | ||
461 | # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ | ||
462 | # define IH_RB_FULL_DRAIN_ENABLE (1 << 6) | ||
463 | # define IH_WPTR_WRITEBACK_ENABLE (1 << 8) | ||
464 | # define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ | ||
465 | # define IH_WPTR_OVERFLOW_ENABLE (1 << 16) | ||
466 | # define IH_WPTR_OVERFLOW_CLEAR (1 << 31) | ||
467 | #define IH_RB_BASE 0x3e04 | ||
468 | #define IH_RB_RPTR 0x3e08 | ||
469 | #define IH_RB_WPTR 0x3e0c | ||
470 | # define RB_OVERFLOW (1 << 0) | ||
471 | # define WPTR_OFFSET_MASK 0x3fffc | ||
472 | #define IH_RB_WPTR_ADDR_HI 0x3e10 | ||
473 | #define IH_RB_WPTR_ADDR_LO 0x3e14 | ||
474 | #define IH_CNTL 0x3e18 | ||
475 | # define ENABLE_INTR (1 << 0) | ||
476 | # define IH_MC_SWAP(x) ((x) << 2) | ||
477 | # define IH_MC_SWAP_NONE 0 | ||
478 | # define IH_MC_SWAP_16BIT 1 | ||
479 | # define IH_MC_SWAP_32BIT 2 | ||
480 | # define IH_MC_SWAP_64BIT 3 | ||
481 | # define RPTR_REARM (1 << 4) | ||
482 | # define MC_WRREQ_CREDIT(x) ((x) << 15) | ||
483 | # define MC_WR_CLEAN_CNT(x) ((x) << 20) | ||
484 | |||
485 | #define RLC_CNTL 0x3f00 | ||
486 | # define RLC_ENABLE (1 << 0) | ||
487 | #define RLC_HB_BASE 0x3f10 | ||
488 | #define RLC_HB_CNTL 0x3f0c | ||
489 | #define RLC_HB_RPTR 0x3f20 | ||
490 | #define RLC_HB_WPTR 0x3f1c | ||
491 | #define RLC_HB_WPTR_LSB_ADDR 0x3f14 | ||
492 | #define RLC_HB_WPTR_MSB_ADDR 0x3f18 | ||
493 | #define RLC_MC_CNTL 0x3f44 | ||
494 | #define RLC_UCODE_CNTL 0x3f48 | ||
495 | #define RLC_UCODE_ADDR 0x3f2c | ||
496 | #define RLC_UCODE_DATA 0x3f30 | ||
497 | |||
498 | #define SRBM_SOFT_RESET 0xe60 | ||
499 | # define SOFT_RESET_RLC (1 << 13) | ||
500 | |||
501 | #define CP_INT_CNTL 0xc124 | ||
502 | # define CNTX_BUSY_INT_ENABLE (1 << 19) | ||
503 | # define CNTX_EMPTY_INT_ENABLE (1 << 20) | ||
504 | # define SCRATCH_INT_ENABLE (1 << 25) | ||
505 | # define TIME_STAMP_INT_ENABLE (1 << 26) | ||
506 | # define IB2_INT_ENABLE (1 << 29) | ||
507 | # define IB1_INT_ENABLE (1 << 30) | ||
508 | # define RB_INT_ENABLE (1 << 31) | ||
509 | #define CP_INT_STATUS 0xc128 | ||
510 | # define SCRATCH_INT_STAT (1 << 25) | ||
511 | # define TIME_STAMP_INT_STAT (1 << 26) | ||
512 | # define IB2_INT_STAT (1 << 29) | ||
513 | # define IB1_INT_STAT (1 << 30) | ||
514 | # define RB_INT_STAT (1 << 31) | ||
515 | |||
516 | #define GRBM_INT_CNTL 0x8060 | ||
517 | # define RDERR_INT_ENABLE (1 << 0) | ||
518 | # define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1) | ||
519 | # define GUI_IDLE_INT_ENABLE (1 << 19) | ||
520 | |||
521 | #define INTERRUPT_CNTL 0x5468 | ||
522 | # define IH_DUMMY_RD_OVERRIDE (1 << 0) | ||
523 | # define IH_DUMMY_RD_EN (1 << 1) | ||
524 | # define IH_REQ_NONSNOOP_EN (1 << 3) | ||
525 | # define GEN_IH_INT_EN (1 << 8) | ||
526 | #define INTERRUPT_CNTL2 0x546c | ||
527 | |||
528 | #define D1MODE_VBLANK_STATUS 0x6534 | ||
529 | #define D2MODE_VBLANK_STATUS 0x6d34 | ||
530 | # define DxMODE_VBLANK_OCCURRED (1 << 0) | ||
531 | # define DxMODE_VBLANK_ACK (1 << 4) | ||
532 | # define DxMODE_VBLANK_STAT (1 << 12) | ||
533 | # define DxMODE_VBLANK_INTERRUPT (1 << 16) | ||
534 | # define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17) | ||
535 | #define D1MODE_VLINE_STATUS 0x653c | ||
536 | #define D2MODE_VLINE_STATUS 0x6d3c | ||
537 | # define DxMODE_VLINE_OCCURRED (1 << 0) | ||
538 | # define DxMODE_VLINE_ACK (1 << 4) | ||
539 | # define DxMODE_VLINE_STAT (1 << 12) | ||
540 | # define DxMODE_VLINE_INTERRUPT (1 << 16) | ||
541 | # define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17) | ||
542 | #define DxMODE_INT_MASK 0x6540 | ||
543 | # define D1MODE_VBLANK_INT_MASK (1 << 0) | ||
544 | # define D1MODE_VLINE_INT_MASK (1 << 4) | ||
545 | # define D2MODE_VBLANK_INT_MASK (1 << 8) | ||
546 | # define D2MODE_VLINE_INT_MASK (1 << 12) | ||
547 | #define DCE3_DISP_INTERRUPT_STATUS 0x7ddc | ||
548 | # define DC_HPD1_INTERRUPT (1 << 18) | ||
549 | # define DC_HPD2_INTERRUPT (1 << 19) | ||
550 | #define DISP_INTERRUPT_STATUS 0x7edc | ||
551 | # define LB_D1_VLINE_INTERRUPT (1 << 2) | ||
552 | # define LB_D2_VLINE_INTERRUPT (1 << 3) | ||
553 | # define LB_D1_VBLANK_INTERRUPT (1 << 4) | ||
554 | # define LB_D2_VBLANK_INTERRUPT (1 << 5) | ||
555 | # define DACA_AUTODETECT_INTERRUPT (1 << 16) | ||
556 | # define DACB_AUTODETECT_INTERRUPT (1 << 17) | ||
557 | # define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18) | ||
558 | # define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19) | ||
559 | # define DC_I2C_SW_DONE_INTERRUPT (1 << 20) | ||
560 | # define DC_I2C_HW_DONE_INTERRUPT (1 << 21) | ||
561 | #define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8 | ||
562 | #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8 | ||
563 | # define DC_HPD4_INTERRUPT (1 << 14) | ||
564 | # define DC_HPD4_RX_INTERRUPT (1 << 15) | ||
565 | # define DC_HPD3_INTERRUPT (1 << 28) | ||
566 | # define DC_HPD1_RX_INTERRUPT (1 << 29) | ||
567 | # define DC_HPD2_RX_INTERRUPT (1 << 30) | ||
568 | #define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec | ||
569 | # define DC_HPD3_RX_INTERRUPT (1 << 0) | ||
570 | # define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1) | ||
571 | # define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2) | ||
572 | # define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3) | ||
573 | # define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4) | ||
574 | # define AUX1_SW_DONE_INTERRUPT (1 << 5) | ||
575 | # define AUX1_LS_DONE_INTERRUPT (1 << 6) | ||
576 | # define AUX2_SW_DONE_INTERRUPT (1 << 7) | ||
577 | # define AUX2_LS_DONE_INTERRUPT (1 << 8) | ||
578 | # define AUX3_SW_DONE_INTERRUPT (1 << 9) | ||
579 | # define AUX3_LS_DONE_INTERRUPT (1 << 10) | ||
580 | # define AUX4_SW_DONE_INTERRUPT (1 << 11) | ||
581 | # define AUX4_LS_DONE_INTERRUPT (1 << 12) | ||
582 | # define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13) | ||
583 | # define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14) | ||
584 | /* DCE 3.2 */ | ||
585 | # define AUX5_SW_DONE_INTERRUPT (1 << 15) | ||
586 | # define AUX5_LS_DONE_INTERRUPT (1 << 16) | ||
587 | # define AUX6_SW_DONE_INTERRUPT (1 << 17) | ||
588 | # define AUX6_LS_DONE_INTERRUPT (1 << 18) | ||
589 | # define DC_HPD5_INTERRUPT (1 << 19) | ||
590 | # define DC_HPD5_RX_INTERRUPT (1 << 20) | ||
591 | # define DC_HPD6_INTERRUPT (1 << 21) | ||
592 | # define DC_HPD6_RX_INTERRUPT (1 << 22) | ||
593 | |||
594 | #define DACA_AUTO_DETECT_CONTROL 0x7828 | ||
595 | #define DACB_AUTO_DETECT_CONTROL 0x7a28 | ||
596 | #define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028 | ||
597 | #define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128 | ||
598 | # define DACx_AUTODETECT_MODE(x) ((x) << 0) | ||
599 | # define DACx_AUTODETECT_MODE_NONE 0 | ||
600 | # define DACx_AUTODETECT_MODE_CONNECT 1 | ||
601 | # define DACx_AUTODETECT_MODE_DISCONNECT 2 | ||
602 | # define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8) | ||
603 | /* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */ | ||
604 | # define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16) | ||
605 | |||
606 | #define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038 | ||
607 | #define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138 | ||
608 | #define DACA_AUTODETECT_INT_CONTROL 0x7838 | ||
609 | #define DACB_AUTODETECT_INT_CONTROL 0x7a38 | ||
610 | # define DACx_AUTODETECT_ACK (1 << 0) | ||
611 | # define DACx_AUTODETECT_INT_ENABLE (1 << 16) | ||
612 | |||
613 | #define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00 | ||
614 | #define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10 | ||
615 | #define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24 | ||
616 | # define DC_HOT_PLUG_DETECTx_EN (1 << 0) | ||
617 | |||
618 | #define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04 | ||
619 | #define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14 | ||
620 | #define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28 | ||
621 | # define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0) | ||
622 | # define DC_HOT_PLUG_DETECTx_SENSE (1 << 1) | ||
623 | |||
624 | /* DCE 3.0 */ | ||
625 | #define DC_HPD1_INT_STATUS 0x7d00 | ||
626 | #define DC_HPD2_INT_STATUS 0x7d0c | ||
627 | #define DC_HPD3_INT_STATUS 0x7d18 | ||
628 | #define DC_HPD4_INT_STATUS 0x7d24 | ||
629 | /* DCE 3.2 */ | ||
630 | #define DC_HPD5_INT_STATUS 0x7dc0 | ||
631 | #define DC_HPD6_INT_STATUS 0x7df4 | ||
632 | # define DC_HPDx_INT_STATUS (1 << 0) | ||
633 | # define DC_HPDx_SENSE (1 << 1) | ||
634 | # define DC_HPDx_RX_INT_STATUS (1 << 8) | ||
635 | |||
636 | #define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08 | ||
637 | #define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18 | ||
638 | #define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c | ||
639 | # define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0) | ||
640 | # define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8) | ||
641 | # define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16) | ||
642 | /* DCE 3.0 */ | ||
643 | #define DC_HPD1_INT_CONTROL 0x7d04 | ||
644 | #define DC_HPD2_INT_CONTROL 0x7d10 | ||
645 | #define DC_HPD3_INT_CONTROL 0x7d1c | ||
646 | #define DC_HPD4_INT_CONTROL 0x7d28 | ||
647 | /* DCE 3.2 */ | ||
648 | #define DC_HPD5_INT_CONTROL 0x7dc4 | ||
649 | #define DC_HPD6_INT_CONTROL 0x7df8 | ||
650 | # define DC_HPDx_INT_ACK (1 << 0) | ||
651 | # define DC_HPDx_INT_POLARITY (1 << 8) | ||
652 | # define DC_HPDx_INT_EN (1 << 16) | ||
653 | # define DC_HPDx_RX_INT_ACK (1 << 20) | ||
654 | # define DC_HPDx_RX_INT_EN (1 << 24) | ||
655 | |||
656 | /* DCE 3.0 */ | ||
657 | #define DC_HPD1_CONTROL 0x7d08 | ||
658 | #define DC_HPD2_CONTROL 0x7d14 | ||
659 | #define DC_HPD3_CONTROL 0x7d20 | ||
660 | #define DC_HPD4_CONTROL 0x7d2c | ||
661 | /* DCE 3.2 */ | ||
662 | #define DC_HPD5_CONTROL 0x7dc8 | ||
663 | #define DC_HPD6_CONTROL 0x7dfc | ||
664 | # define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0) | ||
665 | # define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) | ||
666 | /* DCE 3.2 */ | ||
667 | # define DC_HPDx_EN (1 << 28) | ||
450 | 668 | ||
451 | /* | 669 | /* |
452 | * PM4 | 670 | * PM4 |
@@ -490,7 +708,6 @@ | |||
490 | #define PACKET3_WAIT_REG_MEM 0x3C | 708 | #define PACKET3_WAIT_REG_MEM 0x3C |
491 | #define PACKET3_MEM_WRITE 0x3D | 709 | #define PACKET3_MEM_WRITE 0x3D |
492 | #define PACKET3_INDIRECT_BUFFER 0x32 | 710 | #define PACKET3_INDIRECT_BUFFER 0x32 |
493 | #define PACKET3_CP_INTERRUPT 0x40 | ||
494 | #define PACKET3_SURFACE_SYNC 0x43 | 711 | #define PACKET3_SURFACE_SYNC 0x43 |
495 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) | 712 | # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) |
496 | # define PACKET3_TC_ACTION_ENA (1 << 23) | 713 | # define PACKET3_TC_ACTION_ENA (1 << 23) |
@@ -664,4 +881,5 @@ | |||
664 | #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) | 881 | #define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) |
665 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) | 882 | #define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) |
666 | 883 | ||
884 | #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | ||
667 | #endif | 885 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 757f5cd37744..a15cf9ceb9a7 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #ifndef __RADEON_H__ | 28 | #ifndef __RADEON_H__ |
29 | #define __RADEON_H__ | 29 | #define __RADEON_H__ |
30 | 30 | ||
31 | #include "radeon_object.h" | ||
32 | |||
33 | /* TODO: Here are things that needs to be done : | 31 | /* TODO: Here are things that needs to be done : |
34 | * - surface allocator & initializer : (bit like scratch reg) should | 32 | * - surface allocator & initializer : (bit like scratch reg) should |
35 | * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings | 33 | * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings |
@@ -67,6 +65,11 @@ | |||
67 | #include <linux/list.h> | 65 | #include <linux/list.h> |
68 | #include <linux/kref.h> | 66 | #include <linux/kref.h> |
69 | 67 | ||
68 | #include <ttm/ttm_bo_api.h> | ||
69 | #include <ttm/ttm_bo_driver.h> | ||
70 | #include <ttm/ttm_placement.h> | ||
71 | #include <ttm/ttm_module.h> | ||
72 | |||
70 | #include "radeon_family.h" | 73 | #include "radeon_family.h" |
71 | #include "radeon_mode.h" | 74 | #include "radeon_mode.h" |
72 | #include "radeon_reg.h" | 75 | #include "radeon_reg.h" |
@@ -186,76 +189,60 @@ void radeon_fence_unref(struct radeon_fence **fence); | |||
186 | * Tiling registers | 189 | * Tiling registers |
187 | */ | 190 | */ |
188 | struct radeon_surface_reg { | 191 | struct radeon_surface_reg { |
189 | struct radeon_object *robj; | 192 | struct radeon_bo *bo; |
190 | }; | 193 | }; |
191 | 194 | ||
192 | #define RADEON_GEM_MAX_SURFACES 8 | 195 | #define RADEON_GEM_MAX_SURFACES 8 |
193 | 196 | ||
194 | /* | 197 | /* |
195 | * Radeon buffer. | 198 | * TTM. |
196 | */ | 199 | */ |
197 | struct radeon_object; | 200 | struct radeon_mman { |
201 | struct ttm_bo_global_ref bo_global_ref; | ||
202 | struct ttm_global_reference mem_global_ref; | ||
203 | bool mem_global_referenced; | ||
204 | struct ttm_bo_device bdev; | ||
205 | }; | ||
206 | |||
207 | struct radeon_bo { | ||
208 | /* Protected by gem.mutex */ | ||
209 | struct list_head list; | ||
210 | /* Protected by tbo.reserved */ | ||
211 | struct ttm_buffer_object tbo; | ||
212 | struct ttm_bo_kmap_obj kmap; | ||
213 | unsigned pin_count; | ||
214 | void *kptr; | ||
215 | u32 tiling_flags; | ||
216 | u32 pitch; | ||
217 | int surface_reg; | ||
218 | /* Constant after initialization */ | ||
219 | struct radeon_device *rdev; | ||
220 | struct drm_gem_object *gobj; | ||
221 | }; | ||
198 | 222 | ||
199 | struct radeon_object_list { | 223 | struct radeon_bo_list { |
200 | struct list_head list; | 224 | struct list_head list; |
201 | struct radeon_object *robj; | 225 | struct radeon_bo *bo; |
202 | uint64_t gpu_offset; | 226 | uint64_t gpu_offset; |
203 | unsigned rdomain; | 227 | unsigned rdomain; |
204 | unsigned wdomain; | 228 | unsigned wdomain; |
205 | uint32_t tiling_flags; | 229 | u32 tiling_flags; |
206 | }; | 230 | }; |
207 | 231 | ||
208 | int radeon_object_init(struct radeon_device *rdev); | ||
209 | void radeon_object_fini(struct radeon_device *rdev); | ||
210 | int radeon_object_create(struct radeon_device *rdev, | ||
211 | struct drm_gem_object *gobj, | ||
212 | unsigned long size, | ||
213 | bool kernel, | ||
214 | uint32_t domain, | ||
215 | bool interruptible, | ||
216 | struct radeon_object **robj_ptr); | ||
217 | int radeon_object_kmap(struct radeon_object *robj, void **ptr); | ||
218 | void radeon_object_kunmap(struct radeon_object *robj); | ||
219 | void radeon_object_unref(struct radeon_object **robj); | ||
220 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | ||
221 | uint64_t *gpu_addr); | ||
222 | void radeon_object_unpin(struct radeon_object *robj); | ||
223 | int radeon_object_wait(struct radeon_object *robj); | ||
224 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement); | ||
225 | int radeon_object_evict_vram(struct radeon_device *rdev); | ||
226 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset); | ||
227 | void radeon_object_force_delete(struct radeon_device *rdev); | ||
228 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | ||
229 | struct list_head *head); | ||
230 | int radeon_object_list_validate(struct list_head *head, void *fence); | ||
231 | void radeon_object_list_unvalidate(struct list_head *head); | ||
232 | void radeon_object_list_clean(struct list_head *head); | ||
233 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | ||
234 | struct vm_area_struct *vma); | ||
235 | unsigned long radeon_object_size(struct radeon_object *robj); | ||
236 | void radeon_object_clear_surface_reg(struct radeon_object *robj); | ||
237 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
238 | bool force_drop); | ||
239 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
240 | uint32_t tiling_flags, uint32_t pitch); | ||
241 | void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch); | ||
242 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
243 | struct ttm_mem_reg *mem); | ||
244 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
245 | /* | 232 | /* |
246 | * GEM objects. | 233 | * GEM objects. |
247 | */ | 234 | */ |
248 | struct radeon_gem { | 235 | struct radeon_gem { |
236 | struct mutex mutex; | ||
249 | struct list_head objects; | 237 | struct list_head objects; |
250 | }; | 238 | }; |
251 | 239 | ||
252 | int radeon_gem_init(struct radeon_device *rdev); | 240 | int radeon_gem_init(struct radeon_device *rdev); |
253 | void radeon_gem_fini(struct radeon_device *rdev); | 241 | void radeon_gem_fini(struct radeon_device *rdev); |
254 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 242 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
255 | int alignment, int initial_domain, | 243 | int alignment, int initial_domain, |
256 | bool discardable, bool kernel, | 244 | bool discardable, bool kernel, |
257 | bool interruptible, | 245 | struct drm_gem_object **obj); |
258 | struct drm_gem_object **obj); | ||
259 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 246 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
260 | uint64_t *gpu_addr); | 247 | uint64_t *gpu_addr); |
261 | void radeon_gem_object_unpin(struct drm_gem_object *obj); | 248 | void radeon_gem_object_unpin(struct drm_gem_object *obj); |
@@ -271,7 +258,7 @@ struct radeon_gart_table_ram { | |||
271 | }; | 258 | }; |
272 | 259 | ||
273 | struct radeon_gart_table_vram { | 260 | struct radeon_gart_table_vram { |
274 | struct radeon_object *robj; | 261 | struct radeon_bo *robj; |
275 | volatile uint32_t *ptr; | 262 | volatile uint32_t *ptr; |
276 | }; | 263 | }; |
277 | 264 | ||
@@ -352,11 +339,16 @@ struct radeon_irq { | |||
352 | bool sw_int; | 339 | bool sw_int; |
353 | /* FIXME: use a define max crtc rather than hardcode it */ | 340 | /* FIXME: use a define max crtc rather than hardcode it */ |
354 | bool crtc_vblank_int[2]; | 341 | bool crtc_vblank_int[2]; |
342 | /* FIXME: use defines for max hpd/dacs */ | ||
343 | bool hpd[6]; | ||
344 | spinlock_t sw_lock; | ||
345 | int sw_refcount; | ||
355 | }; | 346 | }; |
356 | 347 | ||
357 | int radeon_irq_kms_init(struct radeon_device *rdev); | 348 | int radeon_irq_kms_init(struct radeon_device *rdev); |
358 | void radeon_irq_kms_fini(struct radeon_device *rdev); | 349 | void radeon_irq_kms_fini(struct radeon_device *rdev); |
359 | 350 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); | |
351 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); | ||
360 | 352 | ||
361 | /* | 353 | /* |
362 | * CP & ring. | 354 | * CP & ring. |
@@ -376,7 +368,7 @@ struct radeon_ib { | |||
376 | */ | 368 | */ |
377 | struct radeon_ib_pool { | 369 | struct radeon_ib_pool { |
378 | struct mutex mutex; | 370 | struct mutex mutex; |
379 | struct radeon_object *robj; | 371 | struct radeon_bo *robj; |
380 | struct list_head scheduled_ibs; | 372 | struct list_head scheduled_ibs; |
381 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 373 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
382 | bool ready; | 374 | bool ready; |
@@ -384,7 +376,7 @@ struct radeon_ib_pool { | |||
384 | }; | 376 | }; |
385 | 377 | ||
386 | struct radeon_cp { | 378 | struct radeon_cp { |
387 | struct radeon_object *ring_obj; | 379 | struct radeon_bo *ring_obj; |
388 | volatile uint32_t *ring; | 380 | volatile uint32_t *ring; |
389 | unsigned rptr; | 381 | unsigned rptr; |
390 | unsigned wptr; | 382 | unsigned wptr; |
@@ -399,8 +391,25 @@ struct radeon_cp { | |||
399 | bool ready; | 391 | bool ready; |
400 | }; | 392 | }; |
401 | 393 | ||
394 | /* | ||
395 | * R6xx+ IH ring | ||
396 | */ | ||
397 | struct r600_ih { | ||
398 | struct radeon_bo *ring_obj; | ||
399 | volatile uint32_t *ring; | ||
400 | unsigned rptr; | ||
401 | unsigned wptr; | ||
402 | unsigned wptr_old; | ||
403 | unsigned ring_size; | ||
404 | uint64_t gpu_addr; | ||
405 | uint32_t align_mask; | ||
406 | uint32_t ptr_mask; | ||
407 | spinlock_t lock; | ||
408 | bool enabled; | ||
409 | }; | ||
410 | |||
402 | struct r600_blit { | 411 | struct r600_blit { |
403 | struct radeon_object *shader_obj; | 412 | struct radeon_bo *shader_obj; |
404 | u64 shader_gpu_addr; | 413 | u64 shader_gpu_addr; |
405 | u32 vs_offset, ps_offset; | 414 | u32 vs_offset, ps_offset; |
406 | u32 state_offset; | 415 | u32 state_offset; |
@@ -430,8 +439,8 @@ void radeon_ring_fini(struct radeon_device *rdev); | |||
430 | */ | 439 | */ |
431 | struct radeon_cs_reloc { | 440 | struct radeon_cs_reloc { |
432 | struct drm_gem_object *gobj; | 441 | struct drm_gem_object *gobj; |
433 | struct radeon_object *robj; | 442 | struct radeon_bo *robj; |
434 | struct radeon_object_list lobj; | 443 | struct radeon_bo_list lobj; |
435 | uint32_t handle; | 444 | uint32_t handle; |
436 | uint32_t flags; | 445 | uint32_t flags; |
437 | }; | 446 | }; |
@@ -519,6 +528,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, | |||
519 | * AGP | 528 | * AGP |
520 | */ | 529 | */ |
521 | int radeon_agp_init(struct radeon_device *rdev); | 530 | int radeon_agp_init(struct radeon_device *rdev); |
531 | void radeon_agp_resume(struct radeon_device *rdev); | ||
522 | void radeon_agp_fini(struct radeon_device *rdev); | 532 | void radeon_agp_fini(struct radeon_device *rdev); |
523 | 533 | ||
524 | 534 | ||
@@ -526,7 +536,7 @@ void radeon_agp_fini(struct radeon_device *rdev); | |||
526 | * Writeback | 536 | * Writeback |
527 | */ | 537 | */ |
528 | struct radeon_wb { | 538 | struct radeon_wb { |
529 | struct radeon_object *wb_obj; | 539 | struct radeon_bo *wb_obj; |
530 | volatile uint32_t *wb; | 540 | volatile uint32_t *wb; |
531 | uint64_t gpu_addr; | 541 | uint64_t gpu_addr; |
532 | }; | 542 | }; |
@@ -638,6 +648,11 @@ struct radeon_asic { | |||
638 | uint32_t offset, uint32_t obj_size); | 648 | uint32_t offset, uint32_t obj_size); |
639 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); | 649 | int (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
640 | void (*bandwidth_update)(struct radeon_device *rdev); | 650 | void (*bandwidth_update)(struct radeon_device *rdev); |
651 | void (*hdp_flush)(struct radeon_device *rdev); | ||
652 | void (*hpd_init)(struct radeon_device *rdev); | ||
653 | void (*hpd_fini)(struct radeon_device *rdev); | ||
654 | bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
655 | void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
641 | }; | 656 | }; |
642 | 657 | ||
643 | /* | 658 | /* |
@@ -750,9 +765,9 @@ struct radeon_device { | |||
750 | uint8_t *bios; | 765 | uint8_t *bios; |
751 | bool is_atom_bios; | 766 | bool is_atom_bios; |
752 | uint16_t bios_header_start; | 767 | uint16_t bios_header_start; |
753 | struct radeon_object *stollen_vga_memory; | 768 | struct radeon_bo *stollen_vga_memory; |
754 | struct fb_info *fbdev_info; | 769 | struct fb_info *fbdev_info; |
755 | struct radeon_object *fbdev_robj; | 770 | struct radeon_bo *fbdev_rbo; |
756 | struct radeon_framebuffer *fbdev_rfb; | 771 | struct radeon_framebuffer *fbdev_rfb; |
757 | /* Register mmio */ | 772 | /* Register mmio */ |
758 | resource_size_t rmmio_base; | 773 | resource_size_t rmmio_base; |
@@ -790,8 +805,12 @@ struct radeon_device { | |||
790 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; | 805 | struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
791 | const struct firmware *me_fw; /* all family ME firmware */ | 806 | const struct firmware *me_fw; /* all family ME firmware */ |
792 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ | 807 | const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
808 | const struct firmware *rlc_fw; /* r6/700 RLC firmware */ | ||
793 | struct r600_blit r600_blit; | 809 | struct r600_blit r600_blit; |
794 | int msi_enabled; /* msi enabled */ | 810 | int msi_enabled; /* msi enabled */ |
811 | struct r600_ih ih; /* r6/700 interrupt ring */ | ||
812 | struct workqueue_struct *wq; | ||
813 | struct work_struct hotplug_work; | ||
795 | }; | 814 | }; |
796 | 815 | ||
797 | int radeon_device_init(struct radeon_device *rdev, | 816 | int radeon_device_init(struct radeon_device *rdev, |
@@ -828,6 +847,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 | |||
828 | } | 847 | } |
829 | } | 848 | } |
830 | 849 | ||
850 | /* | ||
851 | * Cast helper | ||
852 | */ | ||
853 | #define to_radeon_fence(p) ((struct radeon_fence *)(p)) | ||
831 | 854 | ||
832 | /* | 855 | /* |
833 | * Registers read & write functions. | 856 | * Registers read & write functions. |
@@ -964,18 +987,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) | |||
964 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) | 987 | #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) |
965 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 988 | #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
966 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) | 989 | #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) |
967 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) | 990 | #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) |
968 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) | 991 | #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
969 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) | 992 | #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
970 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) | 993 | #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) |
971 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) | 994 | #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) |
972 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) | 995 | #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) |
996 | #define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev)) | ||
997 | #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) | ||
998 | #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) | ||
999 | #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) | ||
1000 | #define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) | ||
973 | 1001 | ||
974 | /* Common functions */ | 1002 | /* Common functions */ |
975 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); | 1003 | extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
976 | extern int radeon_modeset_init(struct radeon_device *rdev); | 1004 | extern int radeon_modeset_init(struct radeon_device *rdev); |
977 | extern void radeon_modeset_fini(struct radeon_device *rdev); | 1005 | extern void radeon_modeset_fini(struct radeon_device *rdev); |
978 | extern bool radeon_card_posted(struct radeon_device *rdev); | 1006 | extern bool radeon_card_posted(struct radeon_device *rdev); |
1007 | extern bool radeon_boot_test_post_card(struct radeon_device *rdev); | ||
979 | extern int radeon_clocks_init(struct radeon_device *rdev); | 1008 | extern int radeon_clocks_init(struct radeon_device *rdev); |
980 | extern void radeon_clocks_fini(struct radeon_device *rdev); | 1009 | extern void radeon_clocks_fini(struct radeon_device *rdev); |
981 | extern void radeon_scratch_init(struct radeon_device *rdev); | 1010 | extern void radeon_scratch_init(struct radeon_device *rdev); |
@@ -1020,7 +1049,7 @@ extern int r100_cp_reset(struct radeon_device *rdev); | |||
1020 | extern void r100_vga_render_disable(struct radeon_device *rdev); | 1049 | extern void r100_vga_render_disable(struct radeon_device *rdev); |
1021 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, | 1050 | extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
1022 | struct radeon_cs_packet *pkt, | 1051 | struct radeon_cs_packet *pkt, |
1023 | struct radeon_object *robj); | 1052 | struct radeon_bo *robj); |
1024 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | 1053 | extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
1025 | struct radeon_cs_packet *pkt, | 1054 | struct radeon_cs_packet *pkt, |
1026 | const unsigned *auth, unsigned n, | 1055 | const unsigned *auth, unsigned n, |
@@ -1028,6 +1057,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, | |||
1028 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, | 1057 | extern int r100_cs_packet_parse(struct radeon_cs_parser *p, |
1029 | struct radeon_cs_packet *pkt, | 1058 | struct radeon_cs_packet *pkt, |
1030 | unsigned idx); | 1059 | unsigned idx); |
1060 | extern void r100_enable_bm(struct radeon_device *rdev); | ||
1061 | extern void r100_set_common_regs(struct radeon_device *rdev); | ||
1031 | 1062 | ||
1032 | /* rv200,rv250,rv280 */ | 1063 | /* rv200,rv250,rv280 */ |
1033 | extern void r200_set_safe_registers(struct radeon_device *rdev); | 1064 | extern void r200_set_safe_registers(struct radeon_device *rdev); |
@@ -1103,7 +1134,14 @@ extern void r600_wb_disable(struct radeon_device *rdev); | |||
1103 | extern void r600_scratch_init(struct radeon_device *rdev); | 1134 | extern void r600_scratch_init(struct radeon_device *rdev); |
1104 | extern int r600_blit_init(struct radeon_device *rdev); | 1135 | extern int r600_blit_init(struct radeon_device *rdev); |
1105 | extern void r600_blit_fini(struct radeon_device *rdev); | 1136 | extern void r600_blit_fini(struct radeon_device *rdev); |
1106 | extern int r600_cp_init_microcode(struct radeon_device *rdev); | 1137 | extern int r600_init_microcode(struct radeon_device *rdev); |
1107 | extern int r600_gpu_reset(struct radeon_device *rdev); | 1138 | extern int r600_gpu_reset(struct radeon_device *rdev); |
1139 | /* r600 irq */ | ||
1140 | extern int r600_irq_init(struct radeon_device *rdev); | ||
1141 | extern void r600_irq_fini(struct radeon_device *rdev); | ||
1142 | extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size); | ||
1143 | extern int r600_irq_set(struct radeon_device *rdev); | ||
1144 | |||
1145 | #include "radeon_object.h" | ||
1108 | 1146 | ||
1109 | #endif | 1147 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index 23ea9955ac59..54bf49a6d676 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c | |||
@@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev) | |||
237 | #endif | 237 | #endif |
238 | } | 238 | } |
239 | 239 | ||
240 | void radeon_agp_resume(struct radeon_device *rdev) | ||
241 | { | ||
242 | #if __OS_HAS_AGP | ||
243 | int r; | ||
244 | if (rdev->flags & RADEON_IS_AGP) { | ||
245 | r = radeon_agp_init(rdev); | ||
246 | if (r) | ||
247 | dev_warn(rdev->dev, "radeon AGP reinit failed\n"); | ||
248 | } | ||
249 | #endif | ||
250 | } | ||
251 | |||
240 | void radeon_agp_fini(struct radeon_device *rdev) | 252 | void radeon_agp_fini(struct radeon_device *rdev) |
241 | { | 253 | { |
242 | #if __OS_HAS_AGP | 254 | #if __OS_HAS_AGP |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c18fbee387d7..636116bedcb4 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg); | |||
76 | void r100_bandwidth_update(struct radeon_device *rdev); | 76 | void r100_bandwidth_update(struct radeon_device *rdev); |
77 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); | 77 | void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
78 | int r100_ring_test(struct radeon_device *rdev); | 78 | int r100_ring_test(struct radeon_device *rdev); |
79 | void r100_hdp_flush(struct radeon_device *rdev); | ||
80 | void r100_hpd_init(struct radeon_device *rdev); | ||
81 | void r100_hpd_fini(struct radeon_device *rdev); | ||
82 | bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
83 | void r100_hpd_set_polarity(struct radeon_device *rdev, | ||
84 | enum radeon_hpd_id hpd); | ||
79 | 85 | ||
80 | static struct radeon_asic r100_asic = { | 86 | static struct radeon_asic r100_asic = { |
81 | .init = &r100_init, | 87 | .init = &r100_init, |
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = { | |||
107 | .set_surface_reg = r100_set_surface_reg, | 113 | .set_surface_reg = r100_set_surface_reg, |
108 | .clear_surface_reg = r100_clear_surface_reg, | 114 | .clear_surface_reg = r100_clear_surface_reg, |
109 | .bandwidth_update = &r100_bandwidth_update, | 115 | .bandwidth_update = &r100_bandwidth_update, |
116 | .hdp_flush = &r100_hdp_flush, | ||
117 | .hpd_init = &r100_hpd_init, | ||
118 | .hpd_fini = &r100_hpd_fini, | ||
119 | .hpd_sense = &r100_hpd_sense, | ||
120 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
110 | }; | 121 | }; |
111 | 122 | ||
112 | 123 | ||
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = { | |||
162 | .set_surface_reg = r100_set_surface_reg, | 173 | .set_surface_reg = r100_set_surface_reg, |
163 | .clear_surface_reg = r100_clear_surface_reg, | 174 | .clear_surface_reg = r100_clear_surface_reg, |
164 | .bandwidth_update = &r100_bandwidth_update, | 175 | .bandwidth_update = &r100_bandwidth_update, |
176 | .hdp_flush = &r100_hdp_flush, | ||
177 | .hpd_init = &r100_hpd_init, | ||
178 | .hpd_fini = &r100_hpd_fini, | ||
179 | .hpd_sense = &r100_hpd_sense, | ||
180 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
165 | }; | 181 | }; |
166 | 182 | ||
167 | /* | 183 | /* |
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = { | |||
201 | .set_surface_reg = r100_set_surface_reg, | 217 | .set_surface_reg = r100_set_surface_reg, |
202 | .clear_surface_reg = r100_clear_surface_reg, | 218 | .clear_surface_reg = r100_clear_surface_reg, |
203 | .bandwidth_update = &r100_bandwidth_update, | 219 | .bandwidth_update = &r100_bandwidth_update, |
220 | .hdp_flush = &r100_hdp_flush, | ||
221 | .hpd_init = &r100_hpd_init, | ||
222 | .hpd_fini = &r100_hpd_fini, | ||
223 | .hpd_sense = &r100_hpd_sense, | ||
224 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
204 | }; | 225 | }; |
205 | 226 | ||
206 | 227 | ||
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = { | |||
245 | .set_surface_reg = r100_set_surface_reg, | 266 | .set_surface_reg = r100_set_surface_reg, |
246 | .clear_surface_reg = r100_clear_surface_reg, | 267 | .clear_surface_reg = r100_clear_surface_reg, |
247 | .bandwidth_update = &r100_bandwidth_update, | 268 | .bandwidth_update = &r100_bandwidth_update, |
269 | .hdp_flush = &r100_hdp_flush, | ||
270 | .hpd_init = &r100_hpd_init, | ||
271 | .hpd_fini = &r100_hpd_fini, | ||
272 | .hpd_sense = &r100_hpd_sense, | ||
273 | .hpd_set_polarity = &r100_hpd_set_polarity, | ||
248 | }; | 274 | }; |
249 | 275 | ||
250 | 276 | ||
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); | |||
263 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); | 289 | uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
264 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); | 290 | void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
265 | void rs600_bandwidth_update(struct radeon_device *rdev); | 291 | void rs600_bandwidth_update(struct radeon_device *rdev); |
292 | void rs600_hpd_init(struct radeon_device *rdev); | ||
293 | void rs600_hpd_fini(struct radeon_device *rdev); | ||
294 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
295 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | ||
296 | enum radeon_hpd_id hpd); | ||
297 | |||
266 | static struct radeon_asic rs600_asic = { | 298 | static struct radeon_asic rs600_asic = { |
267 | .init = &rs600_init, | 299 | .init = &rs600_init, |
268 | .fini = &rs600_fini, | 300 | .fini = &rs600_fini, |
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = { | |||
291 | .set_pcie_lanes = NULL, | 323 | .set_pcie_lanes = NULL, |
292 | .set_clock_gating = &radeon_atom_set_clock_gating, | 324 | .set_clock_gating = &radeon_atom_set_clock_gating, |
293 | .bandwidth_update = &rs600_bandwidth_update, | 325 | .bandwidth_update = &rs600_bandwidth_update, |
326 | .hdp_flush = &r100_hdp_flush, | ||
327 | .hpd_init = &rs600_hpd_init, | ||
328 | .hpd_fini = &rs600_hpd_fini, | ||
329 | .hpd_sense = &rs600_hpd_sense, | ||
330 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
294 | }; | 331 | }; |
295 | 332 | ||
296 | 333 | ||
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = { | |||
334 | .set_surface_reg = r100_set_surface_reg, | 371 | .set_surface_reg = r100_set_surface_reg, |
335 | .clear_surface_reg = r100_clear_surface_reg, | 372 | .clear_surface_reg = r100_clear_surface_reg, |
336 | .bandwidth_update = &rs690_bandwidth_update, | 373 | .bandwidth_update = &rs690_bandwidth_update, |
374 | .hdp_flush = &r100_hdp_flush, | ||
375 | .hpd_init = &rs600_hpd_init, | ||
376 | .hpd_fini = &rs600_hpd_fini, | ||
377 | .hpd_sense = &rs600_hpd_sense, | ||
378 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
337 | }; | 379 | }; |
338 | 380 | ||
339 | 381 | ||
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = { | |||
381 | .set_surface_reg = r100_set_surface_reg, | 423 | .set_surface_reg = r100_set_surface_reg, |
382 | .clear_surface_reg = r100_clear_surface_reg, | 424 | .clear_surface_reg = r100_clear_surface_reg, |
383 | .bandwidth_update = &rv515_bandwidth_update, | 425 | .bandwidth_update = &rv515_bandwidth_update, |
426 | .hdp_flush = &r100_hdp_flush, | ||
427 | .hpd_init = &rs600_hpd_init, | ||
428 | .hpd_fini = &rs600_hpd_fini, | ||
429 | .hpd_sense = &rs600_hpd_sense, | ||
430 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
384 | }; | 431 | }; |
385 | 432 | ||
386 | 433 | ||
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = { | |||
419 | .set_surface_reg = r100_set_surface_reg, | 466 | .set_surface_reg = r100_set_surface_reg, |
420 | .clear_surface_reg = r100_clear_surface_reg, | 467 | .clear_surface_reg = r100_clear_surface_reg, |
421 | .bandwidth_update = &rv515_bandwidth_update, | 468 | .bandwidth_update = &rv515_bandwidth_update, |
469 | .hdp_flush = &r100_hdp_flush, | ||
470 | .hpd_init = &rs600_hpd_init, | ||
471 | .hpd_fini = &rs600_hpd_fini, | ||
472 | .hpd_sense = &rs600_hpd_sense, | ||
473 | .hpd_set_polarity = &rs600_hpd_set_polarity, | ||
422 | }; | 474 | }; |
423 | 475 | ||
424 | /* | 476 | /* |
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev); | |||
455 | int r600_copy_blit(struct radeon_device *rdev, | 507 | int r600_copy_blit(struct radeon_device *rdev, |
456 | uint64_t src_offset, uint64_t dst_offset, | 508 | uint64_t src_offset, uint64_t dst_offset, |
457 | unsigned num_pages, struct radeon_fence *fence); | 509 | unsigned num_pages, struct radeon_fence *fence); |
510 | void r600_hdp_flush(struct radeon_device *rdev); | ||
511 | void r600_hpd_init(struct radeon_device *rdev); | ||
512 | void r600_hpd_fini(struct radeon_device *rdev); | ||
513 | bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); | ||
514 | void r600_hpd_set_polarity(struct radeon_device *rdev, | ||
515 | enum radeon_hpd_id hpd); | ||
458 | 516 | ||
459 | static struct radeon_asic r600_asic = { | 517 | static struct radeon_asic r600_asic = { |
460 | .init = &r600_init, | 518 | .init = &r600_init, |
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = { | |||
470 | .ring_ib_execute = &r600_ring_ib_execute, | 528 | .ring_ib_execute = &r600_ring_ib_execute, |
471 | .irq_set = &r600_irq_set, | 529 | .irq_set = &r600_irq_set, |
472 | .irq_process = &r600_irq_process, | 530 | .irq_process = &r600_irq_process, |
531 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
473 | .fence_ring_emit = &r600_fence_ring_emit, | 532 | .fence_ring_emit = &r600_fence_ring_emit, |
474 | .cs_parse = &r600_cs_parse, | 533 | .cs_parse = &r600_cs_parse, |
475 | .copy_blit = &r600_copy_blit, | 534 | .copy_blit = &r600_copy_blit, |
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = { | |||
484 | .set_surface_reg = r600_set_surface_reg, | 543 | .set_surface_reg = r600_set_surface_reg, |
485 | .clear_surface_reg = r600_clear_surface_reg, | 544 | .clear_surface_reg = r600_clear_surface_reg, |
486 | .bandwidth_update = &rv515_bandwidth_update, | 545 | .bandwidth_update = &rv515_bandwidth_update, |
546 | .hdp_flush = &r600_hdp_flush, | ||
547 | .hpd_init = &r600_hpd_init, | ||
548 | .hpd_fini = &r600_hpd_fini, | ||
549 | .hpd_sense = &r600_hpd_sense, | ||
550 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
487 | }; | 551 | }; |
488 | 552 | ||
489 | /* | 553 | /* |
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = { | |||
509 | .ring_ib_execute = &r600_ring_ib_execute, | 573 | .ring_ib_execute = &r600_ring_ib_execute, |
510 | .irq_set = &r600_irq_set, | 574 | .irq_set = &r600_irq_set, |
511 | .irq_process = &r600_irq_process, | 575 | .irq_process = &r600_irq_process, |
576 | .get_vblank_counter = &rs600_get_vblank_counter, | ||
512 | .fence_ring_emit = &r600_fence_ring_emit, | 577 | .fence_ring_emit = &r600_fence_ring_emit, |
513 | .cs_parse = &r600_cs_parse, | 578 | .cs_parse = &r600_cs_parse, |
514 | .copy_blit = &r600_copy_blit, | 579 | .copy_blit = &r600_copy_blit, |
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = { | |||
523 | .set_surface_reg = r600_set_surface_reg, | 588 | .set_surface_reg = r600_set_surface_reg, |
524 | .clear_surface_reg = r600_clear_surface_reg, | 589 | .clear_surface_reg = r600_clear_surface_reg, |
525 | .bandwidth_update = &rv515_bandwidth_update, | 590 | .bandwidth_update = &rv515_bandwidth_update, |
591 | .hdp_flush = &r600_hdp_flush, | ||
592 | .hpd_init = &r600_hpd_init, | ||
593 | .hpd_fini = &r600_hpd_fini, | ||
594 | .hpd_sense = &r600_hpd_sense, | ||
595 | .hpd_set_polarity = &r600_hpd_set_polarity, | ||
526 | }; | 596 | }; |
527 | 597 | ||
528 | #endif | 598 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 2ed88a820935..d7b0feb7d47f 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
47 | int connector_type, | 47 | int connector_type, |
48 | struct radeon_i2c_bus_rec *i2c_bus, | 48 | struct radeon_i2c_bus_rec *i2c_bus, |
49 | bool linkb, uint32_t igp_lane_info, | 49 | bool linkb, uint32_t igp_lane_info, |
50 | uint16_t connector_object_id); | 50 | uint16_t connector_object_id, |
51 | struct radeon_hpd *hpd); | ||
51 | 52 | ||
52 | /* from radeon_legacy_encoder.c */ | 53 | /* from radeon_legacy_encoder.c */ |
53 | extern void | 54 | extern void |
@@ -60,12 +61,11 @@ union atom_supported_devices { | |||
60 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; | 61 | struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; |
61 | }; | 62 | }; |
62 | 63 | ||
63 | static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | 64 | static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, |
64 | *dev, uint8_t id) | 65 | uint8_t id) |
65 | { | 66 | { |
66 | struct radeon_device *rdev = dev->dev_private; | ||
67 | struct atom_context *ctx = rdev->mode_info.atom_context; | 67 | struct atom_context *ctx = rdev->mode_info.atom_context; |
68 | ATOM_GPIO_I2C_ASSIGMENT gpio; | 68 | ATOM_GPIO_I2C_ASSIGMENT *gpio; |
69 | struct radeon_i2c_bus_rec i2c; | 69 | struct radeon_i2c_bus_rec i2c; |
70 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); | 70 | int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); |
71 | struct _ATOM_GPIO_I2C_INFO *i2c_info; | 71 | struct _ATOM_GPIO_I2C_INFO *i2c_info; |
@@ -78,34 +78,116 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device | |||
78 | 78 | ||
79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); | 79 | i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); |
80 | 80 | ||
81 | gpio = i2c_info->asGPIO_Info[id]; | 81 | gpio = &i2c_info->asGPIO_Info[id]; |
82 | 82 | ||
83 | i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; | 83 | i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; |
84 | i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; | 84 | i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; |
85 | i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; | 85 | i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; |
86 | i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; | 86 | i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; |
87 | i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; | 87 | i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; |
88 | i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; | 88 | i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; |
89 | i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; | 89 | i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; |
90 | i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; | 90 | i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; |
91 | i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); | 91 | i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); |
92 | i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); | 92 | i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); |
93 | i2c.put_clk_mask = (1 << gpio.ucClkEnShift); | 93 | i2c.en_clk_mask = (1 << gpio->ucClkEnShift); |
94 | i2c.put_data_mask = (1 << gpio.ucDataEnShift); | 94 | i2c.en_data_mask = (1 << gpio->ucDataEnShift); |
95 | i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); | 95 | i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); |
96 | i2c.get_data_mask = (1 << gpio.ucDataY_Shift); | 96 | i2c.y_data_mask = (1 << gpio->ucDataY_Shift); |
97 | i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); | 97 | i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); |
98 | i2c.a_data_mask = (1 << gpio.ucDataA_Shift); | 98 | i2c.a_data_mask = (1 << gpio->ucDataA_Shift); |
99 | |||
100 | if (gpio->sucI2cId.sbfAccess.bfHW_Capable) | ||
101 | i2c.hw_capable = true; | ||
102 | else | ||
103 | i2c.hw_capable = false; | ||
104 | |||
105 | if (gpio->sucI2cId.ucAccess == 0xa0) | ||
106 | i2c.mm_i2c = true; | ||
107 | else | ||
108 | i2c.mm_i2c = false; | ||
109 | |||
110 | i2c.i2c_id = gpio->sucI2cId.ucAccess; | ||
111 | |||
99 | i2c.valid = true; | 112 | i2c.valid = true; |
100 | 113 | ||
101 | return i2c; | 114 | return i2c; |
102 | } | 115 | } |
103 | 116 | ||
117 | static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, | ||
118 | u8 id) | ||
119 | { | ||
120 | struct atom_context *ctx = rdev->mode_info.atom_context; | ||
121 | struct radeon_gpio_rec gpio; | ||
122 | int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); | ||
123 | struct _ATOM_GPIO_PIN_LUT *gpio_info; | ||
124 | ATOM_GPIO_PIN_ASSIGNMENT *pin; | ||
125 | u16 data_offset, size; | ||
126 | int i, num_indices; | ||
127 | |||
128 | memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); | ||
129 | gpio.valid = false; | ||
130 | |||
131 | atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset); | ||
132 | |||
133 | gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); | ||
134 | |||
135 | num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); | ||
136 | |||
137 | for (i = 0; i < num_indices; i++) { | ||
138 | pin = &gpio_info->asGPIO_Pin[i]; | ||
139 | if (id == pin->ucGPIO_ID) { | ||
140 | gpio.id = pin->ucGPIO_ID; | ||
141 | gpio.reg = pin->usGpioPin_AIndex * 4; | ||
142 | gpio.mask = (1 << pin->ucGpioPinBitShift); | ||
143 | gpio.valid = true; | ||
144 | break; | ||
145 | } | ||
146 | } | ||
147 | |||
148 | return gpio; | ||
149 | } | ||
150 | |||
151 | static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev, | ||
152 | struct radeon_gpio_rec *gpio) | ||
153 | { | ||
154 | struct radeon_hpd hpd; | ||
155 | hpd.gpio = *gpio; | ||
156 | if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { | ||
157 | switch(gpio->mask) { | ||
158 | case (1 << 0): | ||
159 | hpd.hpd = RADEON_HPD_1; | ||
160 | break; | ||
161 | case (1 << 8): | ||
162 | hpd.hpd = RADEON_HPD_2; | ||
163 | break; | ||
164 | case (1 << 16): | ||
165 | hpd.hpd = RADEON_HPD_3; | ||
166 | break; | ||
167 | case (1 << 24): | ||
168 | hpd.hpd = RADEON_HPD_4; | ||
169 | break; | ||
170 | case (1 << 26): | ||
171 | hpd.hpd = RADEON_HPD_5; | ||
172 | break; | ||
173 | case (1 << 28): | ||
174 | hpd.hpd = RADEON_HPD_6; | ||
175 | break; | ||
176 | default: | ||
177 | hpd.hpd = RADEON_HPD_NONE; | ||
178 | break; | ||
179 | } | ||
180 | } else | ||
181 | hpd.hpd = RADEON_HPD_NONE; | ||
182 | return hpd; | ||
183 | } | ||
184 | |||
104 | static bool radeon_atom_apply_quirks(struct drm_device *dev, | 185 | static bool radeon_atom_apply_quirks(struct drm_device *dev, |
105 | uint32_t supported_device, | 186 | uint32_t supported_device, |
106 | int *connector_type, | 187 | int *connector_type, |
107 | struct radeon_i2c_bus_rec *i2c_bus, | 188 | struct radeon_i2c_bus_rec *i2c_bus, |
108 | uint16_t *line_mux) | 189 | uint16_t *line_mux, |
190 | struct radeon_hpd *hpd) | ||
109 | { | 191 | { |
110 | 192 | ||
111 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ | 193 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
@@ -135,6 +217,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
135 | } | 217 | } |
136 | } | 218 | } |
137 | 219 | ||
220 | /* HIS X1300 is DVI+VGA, not DVI+DVI */ | ||
221 | if ((dev->pdev->device == 0x7146) && | ||
222 | (dev->pdev->subsystem_vendor == 0x17af) && | ||
223 | (dev->pdev->subsystem_device == 0x2058)) { | ||
224 | if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) | ||
225 | return false; | ||
226 | } | ||
227 | |||
228 | /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ | ||
229 | if ((dev->pdev->device == 0x7142) && | ||
230 | (dev->pdev->subsystem_vendor == 0x1458) && | ||
231 | (dev->pdev->subsystem_device == 0x2134)) { | ||
232 | if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) | ||
233 | return false; | ||
234 | } | ||
235 | |||
236 | |||
138 | /* Funky macbooks */ | 237 | /* Funky macbooks */ |
139 | if ((dev->pdev->device == 0x71C5) && | 238 | if ((dev->pdev->device == 0x71C5) && |
140 | (dev->pdev->subsystem_vendor == 0x106b) && | 239 | (dev->pdev->subsystem_vendor == 0x106b) && |
@@ -172,6 +271,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
172 | } | 271 | } |
173 | } | 272 | } |
174 | 273 | ||
274 | /* Acer laptop reports DVI-D as DVI-I */ | ||
275 | if ((dev->pdev->device == 0x95c4) && | ||
276 | (dev->pdev->subsystem_vendor == 0x1025) && | ||
277 | (dev->pdev->subsystem_device == 0x013c)) { | ||
278 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && | ||
279 | (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) | ||
280 | *connector_type = DRM_MODE_CONNECTOR_DVID; | ||
281 | } | ||
282 | |||
175 | return true; | 283 | return true; |
176 | } | 284 | } |
177 | 285 | ||
@@ -240,16 +348,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
240 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 348 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
241 | struct atom_context *ctx = mode_info->atom_context; | 349 | struct atom_context *ctx = mode_info->atom_context; |
242 | int index = GetIndexIntoMasterTable(DATA, Object_Header); | 350 | int index = GetIndexIntoMasterTable(DATA, Object_Header); |
243 | uint16_t size, data_offset; | 351 | u16 size, data_offset; |
244 | uint8_t frev, crev, line_mux = 0; | 352 | u8 frev, crev; |
245 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; | 353 | ATOM_CONNECTOR_OBJECT_TABLE *con_obj; |
246 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; | 354 | ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; |
247 | ATOM_OBJECT_HEADER *obj_header; | 355 | ATOM_OBJECT_HEADER *obj_header; |
248 | int i, j, path_size, device_support; | 356 | int i, j, path_size, device_support; |
249 | int connector_type; | 357 | int connector_type; |
250 | uint16_t igp_lane_info, conn_id, connector_object_id; | 358 | u16 igp_lane_info, conn_id, connector_object_id; |
251 | bool linkb; | 359 | bool linkb; |
252 | struct radeon_i2c_bus_rec ddc_bus; | 360 | struct radeon_i2c_bus_rec ddc_bus; |
361 | struct radeon_gpio_rec gpio; | ||
362 | struct radeon_hpd hpd; | ||
253 | 363 | ||
254 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 364 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); |
255 | 365 | ||
@@ -276,7 +386,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
276 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; | 386 | path = (ATOM_DISPLAY_OBJECT_PATH *) addr; |
277 | path_size += le16_to_cpu(path->usSize); | 387 | path_size += le16_to_cpu(path->usSize); |
278 | linkb = false; | 388 | linkb = false; |
279 | |||
280 | if (device_support & le16_to_cpu(path->usDeviceTag)) { | 389 | if (device_support & le16_to_cpu(path->usDeviceTag)) { |
281 | uint8_t con_obj_id, con_obj_num, con_obj_type; | 390 | uint8_t con_obj_id, con_obj_num, con_obj_type; |
282 | 391 | ||
@@ -377,10 +486,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
377 | } | 486 | } |
378 | } | 487 | } |
379 | 488 | ||
380 | /* look up gpio for ddc */ | 489 | /* look up gpio for ddc, hpd */ |
381 | if ((le16_to_cpu(path->usDeviceTag) & | 490 | if ((le16_to_cpu(path->usDeviceTag) & |
382 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) | 491 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { |
383 | == 0) { | ||
384 | for (j = 0; j < con_obj->ucNumberOfObjects; j++) { | 492 | for (j = 0; j < con_obj->ucNumberOfObjects; j++) { |
385 | if (le16_to_cpu(path->usConnObjectId) == | 493 | if (le16_to_cpu(path->usConnObjectId) == |
386 | le16_to_cpu(con_obj->asObjects[j]. | 494 | le16_to_cpu(con_obj->asObjects[j]. |
@@ -394,21 +502,31 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
394 | asObjects[j]. | 502 | asObjects[j]. |
395 | usRecordOffset)); | 503 | usRecordOffset)); |
396 | ATOM_I2C_RECORD *i2c_record; | 504 | ATOM_I2C_RECORD *i2c_record; |
505 | ATOM_HPD_INT_RECORD *hpd_record; | ||
506 | hpd.hpd = RADEON_HPD_NONE; | ||
397 | 507 | ||
398 | while (record->ucRecordType > 0 | 508 | while (record->ucRecordType > 0 |
399 | && record-> | 509 | && record-> |
400 | ucRecordType <= | 510 | ucRecordType <= |
401 | ATOM_MAX_OBJECT_RECORD_NUMBER) { | 511 | ATOM_MAX_OBJECT_RECORD_NUMBER) { |
402 | switch (record-> | 512 | switch (record->ucRecordType) { |
403 | ucRecordType) { | ||
404 | case ATOM_I2C_RECORD_TYPE: | 513 | case ATOM_I2C_RECORD_TYPE: |
405 | i2c_record = | 514 | i2c_record = |
406 | (ATOM_I2C_RECORD | 515 | (ATOM_I2C_RECORD *) |
407 | *) record; | 516 | record; |
408 | line_mux = | 517 | ddc_bus = radeon_lookup_i2c_gpio(rdev, |
409 | i2c_record-> | 518 | i2c_record-> |
410 | sucI2cId. | 519 | sucI2cId. |
411 | bfI2C_LineMux; | 520 | bfI2C_LineMux); |
521 | break; | ||
522 | case ATOM_HPD_INT_RECORD_TYPE: | ||
523 | hpd_record = | ||
524 | (ATOM_HPD_INT_RECORD *) | ||
525 | record; | ||
526 | gpio = radeon_lookup_gpio(rdev, | ||
527 | hpd_record->ucHPDIntGPIOID); | ||
528 | hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); | ||
529 | hpd.plugged_state = hpd_record->ucPlugged_PinState; | ||
412 | break; | 530 | break; |
413 | } | 531 | } |
414 | record = | 532 | record = |
@@ -421,24 +539,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
421 | break; | 539 | break; |
422 | } | 540 | } |
423 | } | 541 | } |
424 | } else | 542 | } else { |
425 | line_mux = 0; | 543 | hpd.hpd = RADEON_HPD_NONE; |
426 | |||
427 | if ((le16_to_cpu(path->usDeviceTag) == | ||
428 | ATOM_DEVICE_TV1_SUPPORT) | ||
429 | || (le16_to_cpu(path->usDeviceTag) == | ||
430 | ATOM_DEVICE_TV2_SUPPORT) | ||
431 | || (le16_to_cpu(path->usDeviceTag) == | ||
432 | ATOM_DEVICE_CV_SUPPORT)) | ||
433 | ddc_bus.valid = false; | 544 | ddc_bus.valid = false; |
434 | else | 545 | } |
435 | ddc_bus = radeon_lookup_gpio(dev, line_mux); | ||
436 | 546 | ||
437 | conn_id = le16_to_cpu(path->usConnObjectId); | 547 | conn_id = le16_to_cpu(path->usConnObjectId); |
438 | 548 | ||
439 | if (!radeon_atom_apply_quirks | 549 | if (!radeon_atom_apply_quirks |
440 | (dev, le16_to_cpu(path->usDeviceTag), &connector_type, | 550 | (dev, le16_to_cpu(path->usDeviceTag), &connector_type, |
441 | &ddc_bus, &conn_id)) | 551 | &ddc_bus, &conn_id, &hpd)) |
442 | continue; | 552 | continue; |
443 | 553 | ||
444 | radeon_add_atom_connector(dev, | 554 | radeon_add_atom_connector(dev, |
@@ -447,7 +557,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) | |||
447 | usDeviceTag), | 557 | usDeviceTag), |
448 | connector_type, &ddc_bus, | 558 | connector_type, &ddc_bus, |
449 | linkb, igp_lane_info, | 559 | linkb, igp_lane_info, |
450 | connector_object_id); | 560 | connector_object_id, |
561 | &hpd); | ||
451 | 562 | ||
452 | } | 563 | } |
453 | } | 564 | } |
@@ -502,6 +613,7 @@ struct bios_connector { | |||
502 | uint16_t devices; | 613 | uint16_t devices; |
503 | int connector_type; | 614 | int connector_type; |
504 | struct radeon_i2c_bus_rec ddc_bus; | 615 | struct radeon_i2c_bus_rec ddc_bus; |
616 | struct radeon_hpd hpd; | ||
505 | }; | 617 | }; |
506 | 618 | ||
507 | bool radeon_get_atom_connector_info_from_supported_devices_table(struct | 619 | bool radeon_get_atom_connector_info_from_supported_devices_table(struct |
@@ -517,7 +629,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
517 | uint16_t device_support; | 629 | uint16_t device_support; |
518 | uint8_t dac; | 630 | uint8_t dac; |
519 | union atom_supported_devices *supported_devices; | 631 | union atom_supported_devices *supported_devices; |
520 | int i, j; | 632 | int i, j, max_device; |
521 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; | 633 | struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; |
522 | 634 | ||
523 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); | 635 | atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); |
@@ -527,7 +639,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
527 | 639 | ||
528 | device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); | 640 | device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); |
529 | 641 | ||
530 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 642 | if (frev > 1) |
643 | max_device = ATOM_MAX_SUPPORTED_DEVICE; | ||
644 | else | ||
645 | max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; | ||
646 | |||
647 | for (i = 0; i < max_device; i++) { | ||
531 | ATOM_CONNECTOR_INFO_I2C ci = | 648 | ATOM_CONNECTOR_INFO_I2C ci = |
532 | supported_devices->info.asConnInfo[i]; | 649 | supported_devices->info.asConnInfo[i]; |
533 | 650 | ||
@@ -582,8 +699,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
582 | bios_connectors[i].line_mux = 52; | 699 | bios_connectors[i].line_mux = 52; |
583 | } else | 700 | } else |
584 | bios_connectors[i].ddc_bus = | 701 | bios_connectors[i].ddc_bus = |
585 | radeon_lookup_gpio(dev, | 702 | radeon_lookup_i2c_gpio(rdev, |
586 | bios_connectors[i].line_mux); | 703 | bios_connectors[i].line_mux); |
704 | |||
705 | if ((crev > 1) && (frev > 1)) { | ||
706 | u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap; | ||
707 | switch (isb) { | ||
708 | case 0x4: | ||
709 | bios_connectors[i].hpd.hpd = RADEON_HPD_1; | ||
710 | break; | ||
711 | case 0xa: | ||
712 | bios_connectors[i].hpd.hpd = RADEON_HPD_2; | ||
713 | break; | ||
714 | default: | ||
715 | bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; | ||
716 | break; | ||
717 | } | ||
718 | } else { | ||
719 | if (i == ATOM_DEVICE_DFP1_INDEX) | ||
720 | bios_connectors[i].hpd.hpd = RADEON_HPD_1; | ||
721 | else if (i == ATOM_DEVICE_DFP2_INDEX) | ||
722 | bios_connectors[i].hpd.hpd = RADEON_HPD_2; | ||
723 | else | ||
724 | bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; | ||
725 | } | ||
587 | 726 | ||
588 | /* Always set the connector type to VGA for CRT1/CRT2. if they are | 727 | /* Always set the connector type to VGA for CRT1/CRT2. if they are |
589 | * shared with a DVI port, we'll pick up the DVI connector when we | 728 | * shared with a DVI port, we'll pick up the DVI connector when we |
@@ -595,7 +734,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
595 | 734 | ||
596 | if (!radeon_atom_apply_quirks | 735 | if (!radeon_atom_apply_quirks |
597 | (dev, (1 << i), &bios_connectors[i].connector_type, | 736 | (dev, (1 << i), &bios_connectors[i].connector_type, |
598 | &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) | 737 | &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux, |
738 | &bios_connectors[i].hpd)) | ||
599 | continue; | 739 | continue; |
600 | 740 | ||
601 | bios_connectors[i].valid = true; | 741 | bios_connectors[i].valid = true; |
@@ -617,9 +757,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
617 | } | 757 | } |
618 | 758 | ||
619 | /* combine shared connectors */ | 759 | /* combine shared connectors */ |
620 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 760 | for (i = 0; i < max_device; i++) { |
621 | if (bios_connectors[i].valid) { | 761 | if (bios_connectors[i].valid) { |
622 | for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { | 762 | for (j = 0; j < max_device; j++) { |
623 | if (bios_connectors[j].valid && (i != j)) { | 763 | if (bios_connectors[j].valid && (i != j)) { |
624 | if (bios_connectors[i].line_mux == | 764 | if (bios_connectors[i].line_mux == |
625 | bios_connectors[j].line_mux) { | 765 | bios_connectors[j].line_mux) { |
@@ -643,6 +783,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
643 | bios_connectors[i]. | 783 | bios_connectors[i]. |
644 | connector_type = | 784 | connector_type = |
645 | DRM_MODE_CONNECTOR_DVII; | 785 | DRM_MODE_CONNECTOR_DVII; |
786 | if (bios_connectors[j].devices & | ||
787 | (ATOM_DEVICE_DFP_SUPPORT)) | ||
788 | bios_connectors[i].hpd = | ||
789 | bios_connectors[j].hpd; | ||
646 | bios_connectors[j]. | 790 | bios_connectors[j]. |
647 | valid = false; | 791 | valid = false; |
648 | } | 792 | } |
@@ -653,7 +797,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
653 | } | 797 | } |
654 | 798 | ||
655 | /* add the connectors */ | 799 | /* add the connectors */ |
656 | for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { | 800 | for (i = 0; i < max_device; i++) { |
657 | if (bios_connectors[i].valid) { | 801 | if (bios_connectors[i].valid) { |
658 | uint16_t connector_object_id = | 802 | uint16_t connector_object_id = |
659 | atombios_get_connector_object_id(dev, | 803 | atombios_get_connector_object_id(dev, |
@@ -666,7 +810,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
666 | connector_type, | 810 | connector_type, |
667 | &bios_connectors[i].ddc_bus, | 811 | &bios_connectors[i].ddc_bus, |
668 | false, 0, | 812 | false, 0, |
669 | connector_object_id); | 813 | connector_object_id, |
814 | &bios_connectors[i].hpd); | ||
670 | } | 815 | } |
671 | } | 816 | } |
672 | 817 | ||
@@ -901,7 +1046,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
901 | struct radeon_device *rdev = dev->dev_private; | 1046 | struct radeon_device *rdev = dev->dev_private; |
902 | struct radeon_mode_info *mode_info = &rdev->mode_info; | 1047 | struct radeon_mode_info *mode_info = &rdev->mode_info; |
903 | int index = GetIndexIntoMasterTable(DATA, LVDS_Info); | 1048 | int index = GetIndexIntoMasterTable(DATA, LVDS_Info); |
904 | uint16_t data_offset; | 1049 | uint16_t data_offset, misc; |
905 | union lvds_info *lvds_info; | 1050 | union lvds_info *lvds_info; |
906 | uint8_t frev, crev; | 1051 | uint8_t frev, crev; |
907 | struct radeon_encoder_atom_dig *lvds = NULL; | 1052 | struct radeon_encoder_atom_dig *lvds = NULL; |
@@ -940,6 +1085,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct | |||
940 | lvds->panel_pwr_delay = | 1085 | lvds->panel_pwr_delay = |
941 | le16_to_cpu(lvds_info->info.usOffDelayInMs); | 1086 | le16_to_cpu(lvds_info->info.usOffDelayInMs); |
942 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; | 1087 | lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; |
1088 | |||
1089 | misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); | ||
1090 | if (misc & ATOM_VSYNC_POLARITY) | ||
1091 | lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; | ||
1092 | if (misc & ATOM_HSYNC_POLARITY) | ||
1093 | lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; | ||
1094 | if (misc & ATOM_COMPOSITESYNC) | ||
1095 | lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; | ||
1096 | if (misc & ATOM_INTERLACE) | ||
1097 | lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; | ||
1098 | if (misc & ATOM_DOUBLE_CLOCK_MODE) | ||
1099 | lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; | ||
1100 | |||
943 | /* set crtc values */ | 1101 | /* set crtc values */ |
944 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); | 1102 | drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); |
945 | 1103 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 10bd50a7db87..4ddfd4b5bc51 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c | |||
@@ -29,8 +29,8 @@ | |||
29 | void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | 29 | void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, |
30 | unsigned sdomain, unsigned ddomain) | 30 | unsigned sdomain, unsigned ddomain) |
31 | { | 31 | { |
32 | struct radeon_object *dobj = NULL; | 32 | struct radeon_bo *dobj = NULL; |
33 | struct radeon_object *sobj = NULL; | 33 | struct radeon_bo *sobj = NULL; |
34 | struct radeon_fence *fence = NULL; | 34 | struct radeon_fence *fence = NULL; |
35 | uint64_t saddr, daddr; | 35 | uint64_t saddr, daddr; |
36 | unsigned long start_jiffies; | 36 | unsigned long start_jiffies; |
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
41 | 41 | ||
42 | size = bsize; | 42 | size = bsize; |
43 | n = 1024; | 43 | n = 1024; |
44 | r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); | 44 | r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); |
45 | if (r) { | 45 | if (r) { |
46 | goto out_cleanup; | 46 | goto out_cleanup; |
47 | } | 47 | } |
48 | r = radeon_object_pin(sobj, sdomain, &saddr); | 48 | r = radeon_bo_reserve(sobj, false); |
49 | if (unlikely(r != 0)) | ||
50 | goto out_cleanup; | ||
51 | r = radeon_bo_pin(sobj, sdomain, &saddr); | ||
52 | radeon_bo_unreserve(sobj); | ||
49 | if (r) { | 53 | if (r) { |
50 | goto out_cleanup; | 54 | goto out_cleanup; |
51 | } | 55 | } |
52 | r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); | 56 | r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); |
53 | if (r) { | 57 | if (r) { |
54 | goto out_cleanup; | 58 | goto out_cleanup; |
55 | } | 59 | } |
56 | r = radeon_object_pin(dobj, ddomain, &daddr); | 60 | r = radeon_bo_reserve(dobj, false); |
61 | if (unlikely(r != 0)) | ||
62 | goto out_cleanup; | ||
63 | r = radeon_bo_pin(dobj, ddomain, &daddr); | ||
64 | radeon_bo_unreserve(dobj); | ||
57 | if (r) { | 65 | if (r) { |
58 | goto out_cleanup; | 66 | goto out_cleanup; |
59 | } | 67 | } |
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, | |||
109 | } | 117 | } |
110 | out_cleanup: | 118 | out_cleanup: |
111 | if (sobj) { | 119 | if (sobj) { |
112 | radeon_object_unpin(sobj); | 120 | r = radeon_bo_reserve(sobj, false); |
113 | radeon_object_unref(&sobj); | 121 | if (likely(r == 0)) { |
122 | radeon_bo_unpin(sobj); | ||
123 | radeon_bo_unreserve(sobj); | ||
124 | } | ||
125 | radeon_bo_unref(&sobj); | ||
114 | } | 126 | } |
115 | if (dobj) { | 127 | if (dobj) { |
116 | radeon_object_unpin(dobj); | 128 | r = radeon_bo_reserve(dobj, false); |
117 | radeon_object_unref(&dobj); | 129 | if (likely(r == 0)) { |
130 | radeon_bo_unpin(dobj); | ||
131 | radeon_bo_unreserve(dobj); | ||
132 | } | ||
133 | radeon_bo_unref(&dobj); | ||
118 | } | 134 | } |
119 | if (fence) { | 135 | if (fence) { |
120 | radeon_fence_unref(&fence); | 136 | radeon_fence_unref(&fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index a81354167621..b062109efbee 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | |||
44 | 44 | ||
45 | ref_div = | 45 | ref_div = |
46 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; | 46 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; |
47 | |||
48 | if (ref_div == 0) | ||
49 | return 0; | ||
50 | |||
47 | sclk = fb_div / ref_div; | 51 | sclk = fb_div / ref_div; |
48 | 52 | ||
49 | post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; | 53 | post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; |
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | |||
70 | 74 | ||
71 | ref_div = | 75 | ref_div = |
72 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; | 76 | RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; |
77 | |||
78 | if (ref_div == 0) | ||
79 | return 0; | ||
80 | |||
73 | mclk = fb_div / ref_div; | 81 | mclk = fb_div / ref_div; |
74 | 82 | ||
75 | post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; | 83 | post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; |
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev) | |||
98 | ret = radeon_combios_get_clock_info(dev); | 106 | ret = radeon_combios_get_clock_info(dev); |
99 | 107 | ||
100 | if (ret) { | 108 | if (ret) { |
101 | if (p1pll->reference_div < 2) | 109 | if (p1pll->reference_div < 2) { |
102 | p1pll->reference_div = 12; | 110 | if (!ASIC_IS_AVIVO(rdev)) { |
111 | u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV); | ||
112 | if (ASIC_IS_R300(rdev)) | ||
113 | p1pll->reference_div = | ||
114 | (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT; | ||
115 | else | ||
116 | p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK; | ||
117 | if (p1pll->reference_div < 2) | ||
118 | p1pll->reference_div = 12; | ||
119 | } else | ||
120 | p1pll->reference_div = 12; | ||
121 | } | ||
103 | if (p2pll->reference_div < 2) | 122 | if (p2pll->reference_div < 2) |
104 | p2pll->reference_div = 12; | 123 | p2pll->reference_div = 12; |
105 | if (rdev->family < CHIP_RS600) { | 124 | if (rdev->family < CHIP_RS600) { |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 5253cbf6db1f..c5021a3445de 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
50 | uint32_t supported_device, | 50 | uint32_t supported_device, |
51 | int connector_type, | 51 | int connector_type, |
52 | struct radeon_i2c_bus_rec *i2c_bus, | 52 | struct radeon_i2c_bus_rec *i2c_bus, |
53 | uint16_t connector_object_id); | 53 | uint16_t connector_object_id, |
54 | struct radeon_hpd *hpd); | ||
54 | 55 | ||
55 | /* from radeon_legacy_encoder.c */ | 56 | /* from radeon_legacy_encoder.c */ |
56 | extern void | 57 | extern void |
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, | |||
442 | 443 | ||
443 | } | 444 | } |
444 | 445 | ||
445 | struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) | 446 | static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, |
447 | int ddc_line) | ||
446 | { | 448 | { |
447 | struct radeon_i2c_bus_rec i2c; | 449 | struct radeon_i2c_bus_rec i2c; |
448 | 450 | ||
449 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | 451 | if (ddc_line == RADEON_GPIOPAD_MASK) { |
450 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | 452 | i2c.mask_clk_reg = RADEON_GPIOPAD_MASK; |
451 | i2c.a_clk_mask = RADEON_GPIO_A_1; | 453 | i2c.mask_data_reg = RADEON_GPIOPAD_MASK; |
452 | i2c.a_data_mask = RADEON_GPIO_A_0; | 454 | i2c.a_clk_reg = RADEON_GPIOPAD_A; |
453 | i2c.put_clk_mask = RADEON_GPIO_EN_1; | 455 | i2c.a_data_reg = RADEON_GPIOPAD_A; |
454 | i2c.put_data_mask = RADEON_GPIO_EN_0; | 456 | i2c.en_clk_reg = RADEON_GPIOPAD_EN; |
455 | i2c.get_clk_mask = RADEON_GPIO_Y_1; | 457 | i2c.en_data_reg = RADEON_GPIOPAD_EN; |
456 | i2c.get_data_mask = RADEON_GPIO_Y_0; | 458 | i2c.y_clk_reg = RADEON_GPIOPAD_Y; |
457 | if ((ddc_line == RADEON_LCD_GPIO_MASK) || | 459 | i2c.y_data_reg = RADEON_GPIOPAD_Y; |
458 | (ddc_line == RADEON_MDGPIO_EN_REG)) { | 460 | } else if (ddc_line == RADEON_MDGPIO_MASK) { |
459 | i2c.mask_clk_reg = ddc_line; | 461 | i2c.mask_clk_reg = RADEON_MDGPIO_MASK; |
460 | i2c.mask_data_reg = ddc_line; | 462 | i2c.mask_data_reg = RADEON_MDGPIO_MASK; |
461 | i2c.a_clk_reg = ddc_line; | 463 | i2c.a_clk_reg = RADEON_MDGPIO_A; |
462 | i2c.a_data_reg = ddc_line; | 464 | i2c.a_data_reg = RADEON_MDGPIO_A; |
463 | i2c.put_clk_reg = ddc_line; | 465 | i2c.en_clk_reg = RADEON_MDGPIO_EN; |
464 | i2c.put_data_reg = ddc_line; | 466 | i2c.en_data_reg = RADEON_MDGPIO_EN; |
465 | i2c.get_clk_reg = ddc_line + 4; | 467 | i2c.y_clk_reg = RADEON_MDGPIO_Y; |
466 | i2c.get_data_reg = ddc_line + 4; | 468 | i2c.y_data_reg = RADEON_MDGPIO_Y; |
467 | } else { | 469 | } else { |
470 | i2c.mask_clk_mask = RADEON_GPIO_EN_1; | ||
471 | i2c.mask_data_mask = RADEON_GPIO_EN_0; | ||
472 | i2c.a_clk_mask = RADEON_GPIO_A_1; | ||
473 | i2c.a_data_mask = RADEON_GPIO_A_0; | ||
474 | i2c.en_clk_mask = RADEON_GPIO_EN_1; | ||
475 | i2c.en_data_mask = RADEON_GPIO_EN_0; | ||
476 | i2c.y_clk_mask = RADEON_GPIO_Y_1; | ||
477 | i2c.y_data_mask = RADEON_GPIO_Y_0; | ||
478 | |||
468 | i2c.mask_clk_reg = ddc_line; | 479 | i2c.mask_clk_reg = ddc_line; |
469 | i2c.mask_data_reg = ddc_line; | 480 | i2c.mask_data_reg = ddc_line; |
470 | i2c.a_clk_reg = ddc_line; | 481 | i2c.a_clk_reg = ddc_line; |
471 | i2c.a_data_reg = ddc_line; | 482 | i2c.a_data_reg = ddc_line; |
472 | i2c.put_clk_reg = ddc_line; | 483 | i2c.en_clk_reg = ddc_line; |
473 | i2c.put_data_reg = ddc_line; | 484 | i2c.en_data_reg = ddc_line; |
474 | i2c.get_clk_reg = ddc_line; | 485 | i2c.y_clk_reg = ddc_line; |
475 | i2c.get_data_reg = ddc_line; | 486 | i2c.y_data_reg = ddc_line; |
487 | } | ||
488 | |||
489 | if (rdev->family < CHIP_R200) | ||
490 | i2c.hw_capable = false; | ||
491 | else { | ||
492 | switch (ddc_line) { | ||
493 | case RADEON_GPIO_VGA_DDC: | ||
494 | case RADEON_GPIO_DVI_DDC: | ||
495 | i2c.hw_capable = true; | ||
496 | break; | ||
497 | case RADEON_GPIO_MONID: | ||
498 | /* hw i2c on RADEON_GPIO_MONID doesn't seem to work | ||
499 | * reliably on some pre-r4xx hardware; not sure why. | ||
500 | */ | ||
501 | i2c.hw_capable = false; | ||
502 | break; | ||
503 | default: | ||
504 | i2c.hw_capable = false; | ||
505 | break; | ||
506 | } | ||
476 | } | 507 | } |
508 | i2c.mm_i2c = false; | ||
509 | i2c.i2c_id = 0; | ||
477 | 510 | ||
478 | if (ddc_line) | 511 | if (ddc_line) |
479 | i2c.valid = true; | 512 | i2c.valid = true; |
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) | |||
495 | uint16_t sclk, mclk; | 528 | uint16_t sclk, mclk; |
496 | 529 | ||
497 | if (rdev->bios == NULL) | 530 | if (rdev->bios == NULL) |
498 | return NULL; | 531 | return false; |
499 | 532 | ||
500 | pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); | 533 | pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); |
501 | if (pll_info) { | 534 | if (pll_info) { |
@@ -993,8 +1026,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = { | |||
993 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ | 1026 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ |
994 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ | 1027 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ |
995 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ | 1028 | {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ |
996 | {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */ | 1029 | { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */ |
997 | {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ | 1030 | { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */ |
998 | }; | 1031 | }; |
999 | 1032 | ||
1000 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, | 1033 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
@@ -1028,7 +1061,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | |||
1028 | tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); | 1061 | tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); |
1029 | 1062 | ||
1030 | if (tmds_info) { | 1063 | if (tmds_info) { |
1031 | |||
1032 | ver = RBIOS8(tmds_info); | 1064 | ver = RBIOS8(tmds_info); |
1033 | DRM_INFO("DFP table revision: %d\n", ver); | 1065 | DRM_INFO("DFP table revision: %d\n", ver); |
1034 | if (ver == 3) { | 1066 | if (ver == 3) { |
@@ -1063,51 +1095,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | |||
1063 | tmds->tmds_pll[i].value); | 1095 | tmds->tmds_pll[i].value); |
1064 | } | 1096 | } |
1065 | } | 1097 | } |
1066 | } else | 1098 | } else { |
1067 | DRM_INFO("No TMDS info found in BIOS\n"); | 1099 | DRM_INFO("No TMDS info found in BIOS\n"); |
1100 | return false; | ||
1101 | } | ||
1068 | return true; | 1102 | return true; |
1069 | } | 1103 | } |
1070 | 1104 | ||
1071 | struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) | 1105 | bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, |
1106 | struct radeon_encoder_ext_tmds *tmds) | ||
1072 | { | 1107 | { |
1073 | struct radeon_encoder_int_tmds *tmds = NULL; | 1108 | struct drm_device *dev = encoder->base.dev; |
1074 | bool ret; | 1109 | struct radeon_device *rdev = dev->dev_private; |
1110 | struct radeon_i2c_bus_rec i2c_bus; | ||
1075 | 1111 | ||
1076 | tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); | 1112 | /* default for macs */ |
1113 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); | ||
1114 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1077 | 1115 | ||
1078 | if (!tmds) | 1116 | /* XXX some macs have duallink chips */ |
1079 | return NULL; | 1117 | switch (rdev->mode_info.connector_table) { |
1080 | 1118 | case CT_POWERBOOK_EXTERNAL: | |
1081 | ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); | 1119 | case CT_MINI_EXTERNAL: |
1082 | if (ret == false) | 1120 | default: |
1083 | radeon_legacy_get_tmds_info_from_table(encoder, tmds); | 1121 | tmds->dvo_chip = DVO_SIL164; |
1122 | tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ | ||
1123 | break; | ||
1124 | } | ||
1084 | 1125 | ||
1085 | return tmds; | 1126 | return true; |
1086 | } | 1127 | } |
1087 | 1128 | ||
1088 | void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) | 1129 | bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, |
1130 | struct radeon_encoder_ext_tmds *tmds) | ||
1089 | { | 1131 | { |
1090 | struct drm_device *dev = encoder->base.dev; | 1132 | struct drm_device *dev = encoder->base.dev; |
1091 | struct radeon_device *rdev = dev->dev_private; | 1133 | struct radeon_device *rdev = dev->dev_private; |
1092 | uint16_t ext_tmds_info; | 1134 | uint16_t offset; |
1093 | uint8_t ver; | 1135 | uint8_t ver, id, blocks, clk, data; |
1136 | int i; | ||
1137 | enum radeon_combios_ddc gpio; | ||
1138 | struct radeon_i2c_bus_rec i2c_bus; | ||
1094 | 1139 | ||
1095 | if (rdev->bios == NULL) | 1140 | if (rdev->bios == NULL) |
1096 | return; | 1141 | return false; |
1097 | 1142 | ||
1098 | ext_tmds_info = | 1143 | tmds->i2c_bus = NULL; |
1099 | combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | 1144 | if (rdev->flags & RADEON_IS_IGP) { |
1100 | if (ext_tmds_info) { | 1145 | offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); |
1101 | ver = RBIOS8(ext_tmds_info); | 1146 | if (offset) { |
1102 | DRM_INFO("External TMDS Table revision: %d\n", ver); | 1147 | ver = RBIOS8(offset); |
1103 | // TODO | 1148 | DRM_INFO("GPIO Table revision: %d\n", ver); |
1149 | blocks = RBIOS8(offset + 2); | ||
1150 | for (i = 0; i < blocks; i++) { | ||
1151 | id = RBIOS8(offset + 3 + (i * 5) + 0); | ||
1152 | if (id == 136) { | ||
1153 | clk = RBIOS8(offset + 3 + (i * 5) + 3); | ||
1154 | data = RBIOS8(offset + 3 + (i * 5) + 4); | ||
1155 | i2c_bus.valid = true; | ||
1156 | i2c_bus.mask_clk_mask = (1 << clk); | ||
1157 | i2c_bus.mask_data_mask = (1 << data); | ||
1158 | i2c_bus.a_clk_mask = (1 << clk); | ||
1159 | i2c_bus.a_data_mask = (1 << data); | ||
1160 | i2c_bus.en_clk_mask = (1 << clk); | ||
1161 | i2c_bus.en_data_mask = (1 << data); | ||
1162 | i2c_bus.y_clk_mask = (1 << clk); | ||
1163 | i2c_bus.y_data_mask = (1 << data); | ||
1164 | i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK; | ||
1165 | i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK; | ||
1166 | i2c_bus.a_clk_reg = RADEON_GPIOPAD_A; | ||
1167 | i2c_bus.a_data_reg = RADEON_GPIOPAD_A; | ||
1168 | i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN; | ||
1169 | i2c_bus.en_data_reg = RADEON_GPIOPAD_EN; | ||
1170 | i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y; | ||
1171 | i2c_bus.y_data_reg = RADEON_GPIOPAD_Y; | ||
1172 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1173 | tmds->dvo_chip = DVO_SIL164; | ||
1174 | tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */ | ||
1175 | break; | ||
1176 | } | ||
1177 | } | ||
1178 | } | ||
1179 | } else { | ||
1180 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | ||
1181 | if (offset) { | ||
1182 | ver = RBIOS8(offset); | ||
1183 | DRM_INFO("External TMDS Table revision: %d\n", ver); | ||
1184 | tmds->slave_addr = RBIOS8(offset + 4 + 2); | ||
1185 | tmds->slave_addr >>= 1; /* 7 bit addressing */ | ||
1186 | gpio = RBIOS8(offset + 4 + 3); | ||
1187 | switch (gpio) { | ||
1188 | case DDC_MONID: | ||
1189 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); | ||
1190 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1191 | break; | ||
1192 | case DDC_DVI: | ||
1193 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); | ||
1194 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1195 | break; | ||
1196 | case DDC_VGA: | ||
1197 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); | ||
1198 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1199 | break; | ||
1200 | case DDC_CRT2: | ||
1201 | /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ | ||
1202 | if (rdev->family >= CHIP_R300) | ||
1203 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); | ||
1204 | else | ||
1205 | i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); | ||
1206 | tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); | ||
1207 | break; | ||
1208 | case DDC_LCD: /* MM i2c */ | ||
1209 | DRM_ERROR("MM i2c requires hw i2c engine\n"); | ||
1210 | break; | ||
1211 | default: | ||
1212 | DRM_ERROR("Unsupported gpio %d\n", gpio); | ||
1213 | break; | ||
1214 | } | ||
1215 | } | ||
1104 | } | 1216 | } |
1217 | |||
1218 | if (!tmds->i2c_bus) { | ||
1219 | DRM_INFO("No valid Ext TMDS info found in BIOS\n"); | ||
1220 | return false; | ||
1221 | } | ||
1222 | |||
1223 | return true; | ||
1105 | } | 1224 | } |
1106 | 1225 | ||
1107 | bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | 1226 | bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) |
1108 | { | 1227 | { |
1109 | struct radeon_device *rdev = dev->dev_private; | 1228 | struct radeon_device *rdev = dev->dev_private; |
1110 | struct radeon_i2c_bus_rec ddc_i2c; | 1229 | struct radeon_i2c_bus_rec ddc_i2c; |
1230 | struct radeon_hpd hpd; | ||
1111 | 1231 | ||
1112 | rdev->mode_info.connector_table = radeon_connector_table; | 1232 | rdev->mode_info.connector_table = radeon_connector_table; |
1113 | if (rdev->mode_info.connector_table == CT_NONE) { | 1233 | if (rdev->mode_info.connector_table == CT_NONE) { |
@@ -1168,7 +1288,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1168 | /* these are the most common settings */ | 1288 | /* these are the most common settings */ |
1169 | if (rdev->flags & RADEON_SINGLE_CRTC) { | 1289 | if (rdev->flags & RADEON_SINGLE_CRTC) { |
1170 | /* VGA - primary dac */ | 1290 | /* VGA - primary dac */ |
1171 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1291 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1292 | hpd.hpd = RADEON_HPD_NONE; | ||
1172 | radeon_add_legacy_encoder(dev, | 1293 | radeon_add_legacy_encoder(dev, |
1173 | radeon_get_encoder_id(dev, | 1294 | radeon_get_encoder_id(dev, |
1174 | ATOM_DEVICE_CRT1_SUPPORT, | 1295 | ATOM_DEVICE_CRT1_SUPPORT, |
@@ -1178,10 +1299,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1178 | ATOM_DEVICE_CRT1_SUPPORT, | 1299 | ATOM_DEVICE_CRT1_SUPPORT, |
1179 | DRM_MODE_CONNECTOR_VGA, | 1300 | DRM_MODE_CONNECTOR_VGA, |
1180 | &ddc_i2c, | 1301 | &ddc_i2c, |
1181 | CONNECTOR_OBJECT_ID_VGA); | 1302 | CONNECTOR_OBJECT_ID_VGA, |
1303 | &hpd); | ||
1182 | } else if (rdev->flags & RADEON_IS_MOBILITY) { | 1304 | } else if (rdev->flags & RADEON_IS_MOBILITY) { |
1183 | /* LVDS */ | 1305 | /* LVDS */ |
1184 | ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); | 1306 | ddc_i2c = combios_setup_i2c_bus(rdev, 0); |
1307 | hpd.hpd = RADEON_HPD_NONE; | ||
1185 | radeon_add_legacy_encoder(dev, | 1308 | radeon_add_legacy_encoder(dev, |
1186 | radeon_get_encoder_id(dev, | 1309 | radeon_get_encoder_id(dev, |
1187 | ATOM_DEVICE_LCD1_SUPPORT, | 1310 | ATOM_DEVICE_LCD1_SUPPORT, |
@@ -1191,10 +1314,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1191 | ATOM_DEVICE_LCD1_SUPPORT, | 1314 | ATOM_DEVICE_LCD1_SUPPORT, |
1192 | DRM_MODE_CONNECTOR_LVDS, | 1315 | DRM_MODE_CONNECTOR_LVDS, |
1193 | &ddc_i2c, | 1316 | &ddc_i2c, |
1194 | CONNECTOR_OBJECT_ID_LVDS); | 1317 | CONNECTOR_OBJECT_ID_LVDS, |
1318 | &hpd); | ||
1195 | 1319 | ||
1196 | /* VGA - primary dac */ | 1320 | /* VGA - primary dac */ |
1197 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1321 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1322 | hpd.hpd = RADEON_HPD_NONE; | ||
1198 | radeon_add_legacy_encoder(dev, | 1323 | radeon_add_legacy_encoder(dev, |
1199 | radeon_get_encoder_id(dev, | 1324 | radeon_get_encoder_id(dev, |
1200 | ATOM_DEVICE_CRT1_SUPPORT, | 1325 | ATOM_DEVICE_CRT1_SUPPORT, |
@@ -1204,10 +1329,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1204 | ATOM_DEVICE_CRT1_SUPPORT, | 1329 | ATOM_DEVICE_CRT1_SUPPORT, |
1205 | DRM_MODE_CONNECTOR_VGA, | 1330 | DRM_MODE_CONNECTOR_VGA, |
1206 | &ddc_i2c, | 1331 | &ddc_i2c, |
1207 | CONNECTOR_OBJECT_ID_VGA); | 1332 | CONNECTOR_OBJECT_ID_VGA, |
1333 | &hpd); | ||
1208 | } else { | 1334 | } else { |
1209 | /* DVI-I - tv dac, int tmds */ | 1335 | /* DVI-I - tv dac, int tmds */ |
1210 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1336 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1337 | hpd.hpd = RADEON_HPD_1; | ||
1211 | radeon_add_legacy_encoder(dev, | 1338 | radeon_add_legacy_encoder(dev, |
1212 | radeon_get_encoder_id(dev, | 1339 | radeon_get_encoder_id(dev, |
1213 | ATOM_DEVICE_DFP1_SUPPORT, | 1340 | ATOM_DEVICE_DFP1_SUPPORT, |
@@ -1223,10 +1350,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1223 | ATOM_DEVICE_CRT2_SUPPORT, | 1350 | ATOM_DEVICE_CRT2_SUPPORT, |
1224 | DRM_MODE_CONNECTOR_DVII, | 1351 | DRM_MODE_CONNECTOR_DVII, |
1225 | &ddc_i2c, | 1352 | &ddc_i2c, |
1226 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | 1353 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
1354 | &hpd); | ||
1227 | 1355 | ||
1228 | /* VGA - primary dac */ | 1356 | /* VGA - primary dac */ |
1229 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1357 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1358 | hpd.hpd = RADEON_HPD_NONE; | ||
1230 | radeon_add_legacy_encoder(dev, | 1359 | radeon_add_legacy_encoder(dev, |
1231 | radeon_get_encoder_id(dev, | 1360 | radeon_get_encoder_id(dev, |
1232 | ATOM_DEVICE_CRT1_SUPPORT, | 1361 | ATOM_DEVICE_CRT1_SUPPORT, |
@@ -1236,11 +1365,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1236 | ATOM_DEVICE_CRT1_SUPPORT, | 1365 | ATOM_DEVICE_CRT1_SUPPORT, |
1237 | DRM_MODE_CONNECTOR_VGA, | 1366 | DRM_MODE_CONNECTOR_VGA, |
1238 | &ddc_i2c, | 1367 | &ddc_i2c, |
1239 | CONNECTOR_OBJECT_ID_VGA); | 1368 | CONNECTOR_OBJECT_ID_VGA, |
1369 | &hpd); | ||
1240 | } | 1370 | } |
1241 | 1371 | ||
1242 | if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { | 1372 | if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { |
1243 | /* TV - tv dac */ | 1373 | /* TV - tv dac */ |
1374 | ddc_i2c.valid = false; | ||
1375 | hpd.hpd = RADEON_HPD_NONE; | ||
1244 | radeon_add_legacy_encoder(dev, | 1376 | radeon_add_legacy_encoder(dev, |
1245 | radeon_get_encoder_id(dev, | 1377 | radeon_get_encoder_id(dev, |
1246 | ATOM_DEVICE_TV1_SUPPORT, | 1378 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1250,14 +1382,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1250 | ATOM_DEVICE_TV1_SUPPORT, | 1382 | ATOM_DEVICE_TV1_SUPPORT, |
1251 | DRM_MODE_CONNECTOR_SVIDEO, | 1383 | DRM_MODE_CONNECTOR_SVIDEO, |
1252 | &ddc_i2c, | 1384 | &ddc_i2c, |
1253 | CONNECTOR_OBJECT_ID_SVIDEO); | 1385 | CONNECTOR_OBJECT_ID_SVIDEO, |
1386 | &hpd); | ||
1254 | } | 1387 | } |
1255 | break; | 1388 | break; |
1256 | case CT_IBOOK: | 1389 | case CT_IBOOK: |
1257 | DRM_INFO("Connector Table: %d (ibook)\n", | 1390 | DRM_INFO("Connector Table: %d (ibook)\n", |
1258 | rdev->mode_info.connector_table); | 1391 | rdev->mode_info.connector_table); |
1259 | /* LVDS */ | 1392 | /* LVDS */ |
1260 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1393 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1394 | hpd.hpd = RADEON_HPD_NONE; | ||
1261 | radeon_add_legacy_encoder(dev, | 1395 | radeon_add_legacy_encoder(dev, |
1262 | radeon_get_encoder_id(dev, | 1396 | radeon_get_encoder_id(dev, |
1263 | ATOM_DEVICE_LCD1_SUPPORT, | 1397 | ATOM_DEVICE_LCD1_SUPPORT, |
@@ -1265,9 +1399,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1265 | ATOM_DEVICE_LCD1_SUPPORT); | 1399 | ATOM_DEVICE_LCD1_SUPPORT); |
1266 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1400 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1267 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, | 1401 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1268 | CONNECTOR_OBJECT_ID_LVDS); | 1402 | CONNECTOR_OBJECT_ID_LVDS, |
1403 | &hpd); | ||
1269 | /* VGA - TV DAC */ | 1404 | /* VGA - TV DAC */ |
1270 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1405 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1406 | hpd.hpd = RADEON_HPD_NONE; | ||
1271 | radeon_add_legacy_encoder(dev, | 1407 | radeon_add_legacy_encoder(dev, |
1272 | radeon_get_encoder_id(dev, | 1408 | radeon_get_encoder_id(dev, |
1273 | ATOM_DEVICE_CRT2_SUPPORT, | 1409 | ATOM_DEVICE_CRT2_SUPPORT, |
@@ -1275,8 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1275 | ATOM_DEVICE_CRT2_SUPPORT); | 1411 | ATOM_DEVICE_CRT2_SUPPORT); |
1276 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1412 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1277 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | 1413 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1278 | CONNECTOR_OBJECT_ID_VGA); | 1414 | CONNECTOR_OBJECT_ID_VGA, |
1415 | &hpd); | ||
1279 | /* TV - TV DAC */ | 1416 | /* TV - TV DAC */ |
1417 | ddc_i2c.valid = false; | ||
1418 | hpd.hpd = RADEON_HPD_NONE; | ||
1280 | radeon_add_legacy_encoder(dev, | 1419 | radeon_add_legacy_encoder(dev, |
1281 | radeon_get_encoder_id(dev, | 1420 | radeon_get_encoder_id(dev, |
1282 | ATOM_DEVICE_TV1_SUPPORT, | 1421 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1285,13 +1424,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1285 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1424 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1286 | DRM_MODE_CONNECTOR_SVIDEO, | 1425 | DRM_MODE_CONNECTOR_SVIDEO, |
1287 | &ddc_i2c, | 1426 | &ddc_i2c, |
1288 | CONNECTOR_OBJECT_ID_SVIDEO); | 1427 | CONNECTOR_OBJECT_ID_SVIDEO, |
1428 | &hpd); | ||
1289 | break; | 1429 | break; |
1290 | case CT_POWERBOOK_EXTERNAL: | 1430 | case CT_POWERBOOK_EXTERNAL: |
1291 | DRM_INFO("Connector Table: %d (powerbook external tmds)\n", | 1431 | DRM_INFO("Connector Table: %d (powerbook external tmds)\n", |
1292 | rdev->mode_info.connector_table); | 1432 | rdev->mode_info.connector_table); |
1293 | /* LVDS */ | 1433 | /* LVDS */ |
1294 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1434 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1435 | hpd.hpd = RADEON_HPD_NONE; | ||
1295 | radeon_add_legacy_encoder(dev, | 1436 | radeon_add_legacy_encoder(dev, |
1296 | radeon_get_encoder_id(dev, | 1437 | radeon_get_encoder_id(dev, |
1297 | ATOM_DEVICE_LCD1_SUPPORT, | 1438 | ATOM_DEVICE_LCD1_SUPPORT, |
@@ -1299,9 +1440,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1299 | ATOM_DEVICE_LCD1_SUPPORT); | 1440 | ATOM_DEVICE_LCD1_SUPPORT); |
1300 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1441 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1301 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, | 1442 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1302 | CONNECTOR_OBJECT_ID_LVDS); | 1443 | CONNECTOR_OBJECT_ID_LVDS, |
1444 | &hpd); | ||
1303 | /* DVI-I - primary dac, ext tmds */ | 1445 | /* DVI-I - primary dac, ext tmds */ |
1304 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1446 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1447 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
1305 | radeon_add_legacy_encoder(dev, | 1448 | radeon_add_legacy_encoder(dev, |
1306 | radeon_get_encoder_id(dev, | 1449 | radeon_get_encoder_id(dev, |
1307 | ATOM_DEVICE_DFP2_SUPPORT, | 1450 | ATOM_DEVICE_DFP2_SUPPORT, |
@@ -1317,8 +1460,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1317 | ATOM_DEVICE_DFP2_SUPPORT | | 1460 | ATOM_DEVICE_DFP2_SUPPORT | |
1318 | ATOM_DEVICE_CRT1_SUPPORT, | 1461 | ATOM_DEVICE_CRT1_SUPPORT, |
1319 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | 1462 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1320 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I); | 1463 | CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, |
1464 | &hpd); | ||
1321 | /* TV - TV DAC */ | 1465 | /* TV - TV DAC */ |
1466 | ddc_i2c.valid = false; | ||
1467 | hpd.hpd = RADEON_HPD_NONE; | ||
1322 | radeon_add_legacy_encoder(dev, | 1468 | radeon_add_legacy_encoder(dev, |
1323 | radeon_get_encoder_id(dev, | 1469 | radeon_get_encoder_id(dev, |
1324 | ATOM_DEVICE_TV1_SUPPORT, | 1470 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1327,13 +1473,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1327 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1473 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1328 | DRM_MODE_CONNECTOR_SVIDEO, | 1474 | DRM_MODE_CONNECTOR_SVIDEO, |
1329 | &ddc_i2c, | 1475 | &ddc_i2c, |
1330 | CONNECTOR_OBJECT_ID_SVIDEO); | 1476 | CONNECTOR_OBJECT_ID_SVIDEO, |
1477 | &hpd); | ||
1331 | break; | 1478 | break; |
1332 | case CT_POWERBOOK_INTERNAL: | 1479 | case CT_POWERBOOK_INTERNAL: |
1333 | DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", | 1480 | DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", |
1334 | rdev->mode_info.connector_table); | 1481 | rdev->mode_info.connector_table); |
1335 | /* LVDS */ | 1482 | /* LVDS */ |
1336 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1483 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1484 | hpd.hpd = RADEON_HPD_NONE; | ||
1337 | radeon_add_legacy_encoder(dev, | 1485 | radeon_add_legacy_encoder(dev, |
1338 | radeon_get_encoder_id(dev, | 1486 | radeon_get_encoder_id(dev, |
1339 | ATOM_DEVICE_LCD1_SUPPORT, | 1487 | ATOM_DEVICE_LCD1_SUPPORT, |
@@ -1341,9 +1489,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1341 | ATOM_DEVICE_LCD1_SUPPORT); | 1489 | ATOM_DEVICE_LCD1_SUPPORT); |
1342 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1490 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1343 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, | 1491 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1344 | CONNECTOR_OBJECT_ID_LVDS); | 1492 | CONNECTOR_OBJECT_ID_LVDS, |
1493 | &hpd); | ||
1345 | /* DVI-I - primary dac, int tmds */ | 1494 | /* DVI-I - primary dac, int tmds */ |
1346 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1495 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1496 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
1347 | radeon_add_legacy_encoder(dev, | 1497 | radeon_add_legacy_encoder(dev, |
1348 | radeon_get_encoder_id(dev, | 1498 | radeon_get_encoder_id(dev, |
1349 | ATOM_DEVICE_DFP1_SUPPORT, | 1499 | ATOM_DEVICE_DFP1_SUPPORT, |
@@ -1358,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1358 | ATOM_DEVICE_DFP1_SUPPORT | | 1508 | ATOM_DEVICE_DFP1_SUPPORT | |
1359 | ATOM_DEVICE_CRT1_SUPPORT, | 1509 | ATOM_DEVICE_CRT1_SUPPORT, |
1360 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | 1510 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1361 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | 1511 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
1512 | &hpd); | ||
1362 | /* TV - TV DAC */ | 1513 | /* TV - TV DAC */ |
1514 | ddc_i2c.valid = false; | ||
1515 | hpd.hpd = RADEON_HPD_NONE; | ||
1363 | radeon_add_legacy_encoder(dev, | 1516 | radeon_add_legacy_encoder(dev, |
1364 | radeon_get_encoder_id(dev, | 1517 | radeon_get_encoder_id(dev, |
1365 | ATOM_DEVICE_TV1_SUPPORT, | 1518 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1368,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1368 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1521 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1369 | DRM_MODE_CONNECTOR_SVIDEO, | 1522 | DRM_MODE_CONNECTOR_SVIDEO, |
1370 | &ddc_i2c, | 1523 | &ddc_i2c, |
1371 | CONNECTOR_OBJECT_ID_SVIDEO); | 1524 | CONNECTOR_OBJECT_ID_SVIDEO, |
1525 | &hpd); | ||
1372 | break; | 1526 | break; |
1373 | case CT_POWERBOOK_VGA: | 1527 | case CT_POWERBOOK_VGA: |
1374 | DRM_INFO("Connector Table: %d (powerbook vga)\n", | 1528 | DRM_INFO("Connector Table: %d (powerbook vga)\n", |
1375 | rdev->mode_info.connector_table); | 1529 | rdev->mode_info.connector_table); |
1376 | /* LVDS */ | 1530 | /* LVDS */ |
1377 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1531 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1532 | hpd.hpd = RADEON_HPD_NONE; | ||
1378 | radeon_add_legacy_encoder(dev, | 1533 | radeon_add_legacy_encoder(dev, |
1379 | radeon_get_encoder_id(dev, | 1534 | radeon_get_encoder_id(dev, |
1380 | ATOM_DEVICE_LCD1_SUPPORT, | 1535 | ATOM_DEVICE_LCD1_SUPPORT, |
@@ -1382,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1382 | ATOM_DEVICE_LCD1_SUPPORT); | 1537 | ATOM_DEVICE_LCD1_SUPPORT); |
1383 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, | 1538 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, |
1384 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, | 1539 | DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, |
1385 | CONNECTOR_OBJECT_ID_LVDS); | 1540 | CONNECTOR_OBJECT_ID_LVDS, |
1541 | &hpd); | ||
1386 | /* VGA - primary dac */ | 1542 | /* VGA - primary dac */ |
1387 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1543 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1544 | hpd.hpd = RADEON_HPD_NONE; | ||
1388 | radeon_add_legacy_encoder(dev, | 1545 | radeon_add_legacy_encoder(dev, |
1389 | radeon_get_encoder_id(dev, | 1546 | radeon_get_encoder_id(dev, |
1390 | ATOM_DEVICE_CRT1_SUPPORT, | 1547 | ATOM_DEVICE_CRT1_SUPPORT, |
@@ -1392,8 +1549,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1392 | ATOM_DEVICE_CRT1_SUPPORT); | 1549 | ATOM_DEVICE_CRT1_SUPPORT); |
1393 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, | 1550 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, |
1394 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | 1551 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1395 | CONNECTOR_OBJECT_ID_VGA); | 1552 | CONNECTOR_OBJECT_ID_VGA, |
1553 | &hpd); | ||
1396 | /* TV - TV DAC */ | 1554 | /* TV - TV DAC */ |
1555 | ddc_i2c.valid = false; | ||
1556 | hpd.hpd = RADEON_HPD_NONE; | ||
1397 | radeon_add_legacy_encoder(dev, | 1557 | radeon_add_legacy_encoder(dev, |
1398 | radeon_get_encoder_id(dev, | 1558 | radeon_get_encoder_id(dev, |
1399 | ATOM_DEVICE_TV1_SUPPORT, | 1559 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1402,13 +1562,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1402 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1562 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1403 | DRM_MODE_CONNECTOR_SVIDEO, | 1563 | DRM_MODE_CONNECTOR_SVIDEO, |
1404 | &ddc_i2c, | 1564 | &ddc_i2c, |
1405 | CONNECTOR_OBJECT_ID_SVIDEO); | 1565 | CONNECTOR_OBJECT_ID_SVIDEO, |
1566 | &hpd); | ||
1406 | break; | 1567 | break; |
1407 | case CT_MINI_EXTERNAL: | 1568 | case CT_MINI_EXTERNAL: |
1408 | DRM_INFO("Connector Table: %d (mini external tmds)\n", | 1569 | DRM_INFO("Connector Table: %d (mini external tmds)\n", |
1409 | rdev->mode_info.connector_table); | 1570 | rdev->mode_info.connector_table); |
1410 | /* DVI-I - tv dac, ext tmds */ | 1571 | /* DVI-I - tv dac, ext tmds */ |
1411 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | 1572 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); |
1573 | hpd.hpd = RADEON_HPD_2; /* ??? */ | ||
1412 | radeon_add_legacy_encoder(dev, | 1574 | radeon_add_legacy_encoder(dev, |
1413 | radeon_get_encoder_id(dev, | 1575 | radeon_get_encoder_id(dev, |
1414 | ATOM_DEVICE_DFP2_SUPPORT, | 1576 | ATOM_DEVICE_DFP2_SUPPORT, |
@@ -1424,8 +1586,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1424 | ATOM_DEVICE_DFP2_SUPPORT | | 1586 | ATOM_DEVICE_DFP2_SUPPORT | |
1425 | ATOM_DEVICE_CRT2_SUPPORT, | 1587 | ATOM_DEVICE_CRT2_SUPPORT, |
1426 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | 1588 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1427 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | 1589 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
1590 | &hpd); | ||
1428 | /* TV - TV DAC */ | 1591 | /* TV - TV DAC */ |
1592 | ddc_i2c.valid = false; | ||
1593 | hpd.hpd = RADEON_HPD_NONE; | ||
1429 | radeon_add_legacy_encoder(dev, | 1594 | radeon_add_legacy_encoder(dev, |
1430 | radeon_get_encoder_id(dev, | 1595 | radeon_get_encoder_id(dev, |
1431 | ATOM_DEVICE_TV1_SUPPORT, | 1596 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1434,13 +1599,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1434 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, | 1599 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, |
1435 | DRM_MODE_CONNECTOR_SVIDEO, | 1600 | DRM_MODE_CONNECTOR_SVIDEO, |
1436 | &ddc_i2c, | 1601 | &ddc_i2c, |
1437 | CONNECTOR_OBJECT_ID_SVIDEO); | 1602 | CONNECTOR_OBJECT_ID_SVIDEO, |
1603 | &hpd); | ||
1438 | break; | 1604 | break; |
1439 | case CT_MINI_INTERNAL: | 1605 | case CT_MINI_INTERNAL: |
1440 | DRM_INFO("Connector Table: %d (mini internal tmds)\n", | 1606 | DRM_INFO("Connector Table: %d (mini internal tmds)\n", |
1441 | rdev->mode_info.connector_table); | 1607 | rdev->mode_info.connector_table); |
1442 | /* DVI-I - tv dac, int tmds */ | 1608 | /* DVI-I - tv dac, int tmds */ |
1443 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | 1609 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); |
1610 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
1444 | radeon_add_legacy_encoder(dev, | 1611 | radeon_add_legacy_encoder(dev, |
1445 | radeon_get_encoder_id(dev, | 1612 | radeon_get_encoder_id(dev, |
1446 | ATOM_DEVICE_DFP1_SUPPORT, | 1613 | ATOM_DEVICE_DFP1_SUPPORT, |
@@ -1455,8 +1622,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1455 | ATOM_DEVICE_DFP1_SUPPORT | | 1622 | ATOM_DEVICE_DFP1_SUPPORT | |
1456 | ATOM_DEVICE_CRT2_SUPPORT, | 1623 | ATOM_DEVICE_CRT2_SUPPORT, |
1457 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, | 1624 | DRM_MODE_CONNECTOR_DVII, &ddc_i2c, |
1458 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | 1625 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
1626 | &hpd); | ||
1459 | /* TV - TV DAC */ | 1627 | /* TV - TV DAC */ |
1628 | ddc_i2c.valid = false; | ||
1629 | hpd.hpd = RADEON_HPD_NONE; | ||
1460 | radeon_add_legacy_encoder(dev, | 1630 | radeon_add_legacy_encoder(dev, |
1461 | radeon_get_encoder_id(dev, | 1631 | radeon_get_encoder_id(dev, |
1462 | ATOM_DEVICE_TV1_SUPPORT, | 1632 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1465,13 +1635,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1465 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, | 1635 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, |
1466 | DRM_MODE_CONNECTOR_SVIDEO, | 1636 | DRM_MODE_CONNECTOR_SVIDEO, |
1467 | &ddc_i2c, | 1637 | &ddc_i2c, |
1468 | CONNECTOR_OBJECT_ID_SVIDEO); | 1638 | CONNECTOR_OBJECT_ID_SVIDEO, |
1639 | &hpd); | ||
1469 | break; | 1640 | break; |
1470 | case CT_IMAC_G5_ISIGHT: | 1641 | case CT_IMAC_G5_ISIGHT: |
1471 | DRM_INFO("Connector Table: %d (imac g5 isight)\n", | 1642 | DRM_INFO("Connector Table: %d (imac g5 isight)\n", |
1472 | rdev->mode_info.connector_table); | 1643 | rdev->mode_info.connector_table); |
1473 | /* DVI-D - int tmds */ | 1644 | /* DVI-D - int tmds */ |
1474 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); | 1645 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); |
1646 | hpd.hpd = RADEON_HPD_1; /* ??? */ | ||
1475 | radeon_add_legacy_encoder(dev, | 1647 | radeon_add_legacy_encoder(dev, |
1476 | radeon_get_encoder_id(dev, | 1648 | radeon_get_encoder_id(dev, |
1477 | ATOM_DEVICE_DFP1_SUPPORT, | 1649 | ATOM_DEVICE_DFP1_SUPPORT, |
@@ -1479,9 +1651,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1479 | ATOM_DEVICE_DFP1_SUPPORT); | 1651 | ATOM_DEVICE_DFP1_SUPPORT); |
1480 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, | 1652 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, |
1481 | DRM_MODE_CONNECTOR_DVID, &ddc_i2c, | 1653 | DRM_MODE_CONNECTOR_DVID, &ddc_i2c, |
1482 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); | 1654 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, |
1655 | &hpd); | ||
1483 | /* VGA - tv dac */ | 1656 | /* VGA - tv dac */ |
1484 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1657 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1658 | hpd.hpd = RADEON_HPD_NONE; | ||
1485 | radeon_add_legacy_encoder(dev, | 1659 | radeon_add_legacy_encoder(dev, |
1486 | radeon_get_encoder_id(dev, | 1660 | radeon_get_encoder_id(dev, |
1487 | ATOM_DEVICE_CRT2_SUPPORT, | 1661 | ATOM_DEVICE_CRT2_SUPPORT, |
@@ -1489,8 +1663,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1489 | ATOM_DEVICE_CRT2_SUPPORT); | 1663 | ATOM_DEVICE_CRT2_SUPPORT); |
1490 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1664 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1491 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | 1665 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1492 | CONNECTOR_OBJECT_ID_VGA); | 1666 | CONNECTOR_OBJECT_ID_VGA, |
1667 | &hpd); | ||
1493 | /* TV - TV DAC */ | 1668 | /* TV - TV DAC */ |
1669 | ddc_i2c.valid = false; | ||
1670 | hpd.hpd = RADEON_HPD_NONE; | ||
1494 | radeon_add_legacy_encoder(dev, | 1671 | radeon_add_legacy_encoder(dev, |
1495 | radeon_get_encoder_id(dev, | 1672 | radeon_get_encoder_id(dev, |
1496 | ATOM_DEVICE_TV1_SUPPORT, | 1673 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1499,13 +1676,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1499 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1676 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1500 | DRM_MODE_CONNECTOR_SVIDEO, | 1677 | DRM_MODE_CONNECTOR_SVIDEO, |
1501 | &ddc_i2c, | 1678 | &ddc_i2c, |
1502 | CONNECTOR_OBJECT_ID_SVIDEO); | 1679 | CONNECTOR_OBJECT_ID_SVIDEO, |
1680 | &hpd); | ||
1503 | break; | 1681 | break; |
1504 | case CT_EMAC: | 1682 | case CT_EMAC: |
1505 | DRM_INFO("Connector Table: %d (emac)\n", | 1683 | DRM_INFO("Connector Table: %d (emac)\n", |
1506 | rdev->mode_info.connector_table); | 1684 | rdev->mode_info.connector_table); |
1507 | /* VGA - primary dac */ | 1685 | /* VGA - primary dac */ |
1508 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1686 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1687 | hpd.hpd = RADEON_HPD_NONE; | ||
1509 | radeon_add_legacy_encoder(dev, | 1688 | radeon_add_legacy_encoder(dev, |
1510 | radeon_get_encoder_id(dev, | 1689 | radeon_get_encoder_id(dev, |
1511 | ATOM_DEVICE_CRT1_SUPPORT, | 1690 | ATOM_DEVICE_CRT1_SUPPORT, |
@@ -1513,9 +1692,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1513 | ATOM_DEVICE_CRT1_SUPPORT); | 1692 | ATOM_DEVICE_CRT1_SUPPORT); |
1514 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, | 1693 | radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, |
1515 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | 1694 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1516 | CONNECTOR_OBJECT_ID_VGA); | 1695 | CONNECTOR_OBJECT_ID_VGA, |
1696 | &hpd); | ||
1517 | /* VGA - tv dac */ | 1697 | /* VGA - tv dac */ |
1518 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | 1698 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); |
1699 | hpd.hpd = RADEON_HPD_NONE; | ||
1519 | radeon_add_legacy_encoder(dev, | 1700 | radeon_add_legacy_encoder(dev, |
1520 | radeon_get_encoder_id(dev, | 1701 | radeon_get_encoder_id(dev, |
1521 | ATOM_DEVICE_CRT2_SUPPORT, | 1702 | ATOM_DEVICE_CRT2_SUPPORT, |
@@ -1523,8 +1704,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1523 | ATOM_DEVICE_CRT2_SUPPORT); | 1704 | ATOM_DEVICE_CRT2_SUPPORT); |
1524 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, | 1705 | radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, |
1525 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, | 1706 | DRM_MODE_CONNECTOR_VGA, &ddc_i2c, |
1526 | CONNECTOR_OBJECT_ID_VGA); | 1707 | CONNECTOR_OBJECT_ID_VGA, |
1708 | &hpd); | ||
1527 | /* TV - TV DAC */ | 1709 | /* TV - TV DAC */ |
1710 | ddc_i2c.valid = false; | ||
1711 | hpd.hpd = RADEON_HPD_NONE; | ||
1528 | radeon_add_legacy_encoder(dev, | 1712 | radeon_add_legacy_encoder(dev, |
1529 | radeon_get_encoder_id(dev, | 1713 | radeon_get_encoder_id(dev, |
1530 | ATOM_DEVICE_TV1_SUPPORT, | 1714 | ATOM_DEVICE_TV1_SUPPORT, |
@@ -1533,7 +1717,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) | |||
1533 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, | 1717 | radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, |
1534 | DRM_MODE_CONNECTOR_SVIDEO, | 1718 | DRM_MODE_CONNECTOR_SVIDEO, |
1535 | &ddc_i2c, | 1719 | &ddc_i2c, |
1536 | CONNECTOR_OBJECT_ID_SVIDEO); | 1720 | CONNECTOR_OBJECT_ID_SVIDEO, |
1721 | &hpd); | ||
1537 | break; | 1722 | break; |
1538 | default: | 1723 | default: |
1539 | DRM_INFO("Connector table: %d (invalid)\n", | 1724 | DRM_INFO("Connector table: %d (invalid)\n", |
@@ -1550,7 +1735,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
1550 | int bios_index, | 1735 | int bios_index, |
1551 | enum radeon_combios_connector | 1736 | enum radeon_combios_connector |
1552 | *legacy_connector, | 1737 | *legacy_connector, |
1553 | struct radeon_i2c_bus_rec *ddc_i2c) | 1738 | struct radeon_i2c_bus_rec *ddc_i2c, |
1739 | struct radeon_hpd *hpd) | ||
1554 | { | 1740 | { |
1555 | struct radeon_device *rdev = dev->dev_private; | 1741 | struct radeon_device *rdev = dev->dev_private; |
1556 | 1742 | ||
@@ -1558,29 +1744,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, | |||
1558 | if ((rdev->family == CHIP_RS400 || | 1744 | if ((rdev->family == CHIP_RS400 || |
1559 | rdev->family == CHIP_RS480) && | 1745 | rdev->family == CHIP_RS480) && |
1560 | ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) | 1746 | ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) |
1561 | *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); | 1747 | *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); |
1562 | else if ((rdev->family == CHIP_RS400 || | 1748 | else if ((rdev->family == CHIP_RS400 || |
1563 | rdev->family == CHIP_RS480) && | 1749 | rdev->family == CHIP_RS480) && |
1564 | ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) { | 1750 | ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) { |
1565 | ddc_i2c->valid = true; | 1751 | *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK); |
1566 | ddc_i2c->mask_clk_mask = (0x20 << 8); | 1752 | ddc_i2c->mask_clk_mask = (0x20 << 8); |
1567 | ddc_i2c->mask_data_mask = 0x80; | 1753 | ddc_i2c->mask_data_mask = 0x80; |
1568 | ddc_i2c->a_clk_mask = (0x20 << 8); | 1754 | ddc_i2c->a_clk_mask = (0x20 << 8); |
1569 | ddc_i2c->a_data_mask = 0x80; | 1755 | ddc_i2c->a_data_mask = 0x80; |
1570 | ddc_i2c->put_clk_mask = (0x20 << 8); | 1756 | ddc_i2c->en_clk_mask = (0x20 << 8); |
1571 | ddc_i2c->put_data_mask = 0x80; | 1757 | ddc_i2c->en_data_mask = 0x80; |
1572 | ddc_i2c->get_clk_mask = (0x20 << 8); | 1758 | ddc_i2c->y_clk_mask = (0x20 << 8); |
1573 | ddc_i2c->get_data_mask = 0x80; | 1759 | ddc_i2c->y_data_mask = 0x80; |
1574 | ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK; | ||
1575 | ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK; | ||
1576 | ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A; | ||
1577 | ddc_i2c->a_data_reg = RADEON_GPIOPAD_A; | ||
1578 | ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN; | ||
1579 | ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN; | ||
1580 | ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG; | ||
1581 | ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG; | ||
1582 | } | 1760 | } |
1583 | 1761 | ||
1762 | /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */ | ||
1763 | if ((rdev->family >= CHIP_R300) && | ||
1764 | ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) | ||
1765 | *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); | ||
1766 | |||
1584 | /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, | 1767 | /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, |
1585 | one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ | 1768 | one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ |
1586 | if (dev->pdev->device == 0x515e && | 1769 | if (dev->pdev->device == 0x515e && |
@@ -1624,6 +1807,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) | |||
1624 | dev->pdev->subsystem_device == 0x280a) | 1807 | dev->pdev->subsystem_device == 0x280a) |
1625 | return false; | 1808 | return false; |
1626 | 1809 | ||
1810 | /* MSI S270 has non-existent TV port */ | ||
1811 | if (dev->pdev->device == 0x5955 && | ||
1812 | dev->pdev->subsystem_vendor == 0x1462 && | ||
1813 | dev->pdev->subsystem_device == 0x0131) | ||
1814 | return false; | ||
1815 | |||
1627 | return true; | 1816 | return true; |
1628 | } | 1817 | } |
1629 | 1818 | ||
@@ -1671,6 +1860,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1671 | enum radeon_combios_connector connector; | 1860 | enum radeon_combios_connector connector; |
1672 | int i = 0; | 1861 | int i = 0; |
1673 | struct radeon_i2c_bus_rec ddc_i2c; | 1862 | struct radeon_i2c_bus_rec ddc_i2c; |
1863 | struct radeon_hpd hpd; | ||
1674 | 1864 | ||
1675 | if (rdev->bios == NULL) | 1865 | if (rdev->bios == NULL) |
1676 | return false; | 1866 | return false; |
@@ -1691,26 +1881,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1691 | switch (ddc_type) { | 1881 | switch (ddc_type) { |
1692 | case DDC_MONID: | 1882 | case DDC_MONID: |
1693 | ddc_i2c = | 1883 | ddc_i2c = |
1694 | combios_setup_i2c_bus(RADEON_GPIO_MONID); | 1884 | combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID); |
1695 | break; | 1885 | break; |
1696 | case DDC_DVI: | 1886 | case DDC_DVI: |
1697 | ddc_i2c = | 1887 | ddc_i2c = |
1698 | combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 1888 | combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
1699 | break; | 1889 | break; |
1700 | case DDC_VGA: | 1890 | case DDC_VGA: |
1701 | ddc_i2c = | 1891 | ddc_i2c = |
1702 | combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 1892 | combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
1703 | break; | 1893 | break; |
1704 | case DDC_CRT2: | 1894 | case DDC_CRT2: |
1705 | ddc_i2c = | 1895 | ddc_i2c = |
1706 | combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); | 1896 | combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC); |
1707 | break; | 1897 | break; |
1708 | default: | 1898 | default: |
1709 | break; | 1899 | break; |
1710 | } | 1900 | } |
1711 | 1901 | ||
1902 | switch (connector) { | ||
1903 | case CONNECTOR_PROPRIETARY_LEGACY: | ||
1904 | case CONNECTOR_DVI_I_LEGACY: | ||
1905 | case CONNECTOR_DVI_D_LEGACY: | ||
1906 | if ((tmp >> 4) & 0x1) | ||
1907 | hpd.hpd = RADEON_HPD_2; | ||
1908 | else | ||
1909 | hpd.hpd = RADEON_HPD_1; | ||
1910 | break; | ||
1911 | default: | ||
1912 | hpd.hpd = RADEON_HPD_NONE; | ||
1913 | break; | ||
1914 | } | ||
1915 | |||
1712 | if (!radeon_apply_legacy_quirks(dev, i, &connector, | 1916 | if (!radeon_apply_legacy_quirks(dev, i, &connector, |
1713 | &ddc_i2c)) | 1917 | &ddc_i2c, &hpd)) |
1714 | continue; | 1918 | continue; |
1715 | 1919 | ||
1716 | switch (connector) { | 1920 | switch (connector) { |
@@ -1727,7 +1931,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1727 | legacy_connector_convert | 1931 | legacy_connector_convert |
1728 | [connector], | 1932 | [connector], |
1729 | &ddc_i2c, | 1933 | &ddc_i2c, |
1730 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); | 1934 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D, |
1935 | &hpd); | ||
1731 | break; | 1936 | break; |
1732 | case CONNECTOR_CRT_LEGACY: | 1937 | case CONNECTOR_CRT_LEGACY: |
1733 | if (tmp & 0x1) { | 1938 | if (tmp & 0x1) { |
@@ -1753,7 +1958,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1753 | legacy_connector_convert | 1958 | legacy_connector_convert |
1754 | [connector], | 1959 | [connector], |
1755 | &ddc_i2c, | 1960 | &ddc_i2c, |
1756 | CONNECTOR_OBJECT_ID_VGA); | 1961 | CONNECTOR_OBJECT_ID_VGA, |
1962 | &hpd); | ||
1757 | break; | 1963 | break; |
1758 | case CONNECTOR_DVI_I_LEGACY: | 1964 | case CONNECTOR_DVI_I_LEGACY: |
1759 | devices = 0; | 1965 | devices = 0; |
@@ -1799,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1799 | legacy_connector_convert | 2005 | legacy_connector_convert |
1800 | [connector], | 2006 | [connector], |
1801 | &ddc_i2c, | 2007 | &ddc_i2c, |
1802 | connector_object_id); | 2008 | connector_object_id, |
2009 | &hpd); | ||
1803 | break; | 2010 | break; |
1804 | case CONNECTOR_DVI_D_LEGACY: | 2011 | case CONNECTOR_DVI_D_LEGACY: |
1805 | if ((tmp >> 4) & 0x1) { | 2012 | if ((tmp >> 4) & 0x1) { |
@@ -1817,7 +2024,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1817 | legacy_connector_convert | 2024 | legacy_connector_convert |
1818 | [connector], | 2025 | [connector], |
1819 | &ddc_i2c, | 2026 | &ddc_i2c, |
1820 | connector_object_id); | 2027 | connector_object_id, |
2028 | &hpd); | ||
1821 | break; | 2029 | break; |
1822 | case CONNECTOR_CTV_LEGACY: | 2030 | case CONNECTOR_CTV_LEGACY: |
1823 | case CONNECTOR_STV_LEGACY: | 2031 | case CONNECTOR_STV_LEGACY: |
@@ -1832,7 +2040,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1832 | legacy_connector_convert | 2040 | legacy_connector_convert |
1833 | [connector], | 2041 | [connector], |
1834 | &ddc_i2c, | 2042 | &ddc_i2c, |
1835 | CONNECTOR_OBJECT_ID_SVIDEO); | 2043 | CONNECTOR_OBJECT_ID_SVIDEO, |
2044 | &hpd); | ||
1836 | break; | 2045 | break; |
1837 | default: | 2046 | default: |
1838 | DRM_ERROR("Unknown connector type: %d\n", | 2047 | DRM_ERROR("Unknown connector type: %d\n", |
@@ -1858,14 +2067,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1858 | 0), | 2067 | 0), |
1859 | ATOM_DEVICE_DFP1_SUPPORT); | 2068 | ATOM_DEVICE_DFP1_SUPPORT); |
1860 | 2069 | ||
1861 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); | 2070 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC); |
2071 | hpd.hpd = RADEON_HPD_NONE; | ||
1862 | radeon_add_legacy_connector(dev, | 2072 | radeon_add_legacy_connector(dev, |
1863 | 0, | 2073 | 0, |
1864 | ATOM_DEVICE_CRT1_SUPPORT | | 2074 | ATOM_DEVICE_CRT1_SUPPORT | |
1865 | ATOM_DEVICE_DFP1_SUPPORT, | 2075 | ATOM_DEVICE_DFP1_SUPPORT, |
1866 | DRM_MODE_CONNECTOR_DVII, | 2076 | DRM_MODE_CONNECTOR_DVII, |
1867 | &ddc_i2c, | 2077 | &ddc_i2c, |
1868 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); | 2078 | CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I, |
2079 | &hpd); | ||
1869 | } else { | 2080 | } else { |
1870 | uint16_t crt_info = | 2081 | uint16_t crt_info = |
1871 | combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); | 2082 | combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); |
@@ -1876,13 +2087,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1876 | ATOM_DEVICE_CRT1_SUPPORT, | 2087 | ATOM_DEVICE_CRT1_SUPPORT, |
1877 | 1), | 2088 | 1), |
1878 | ATOM_DEVICE_CRT1_SUPPORT); | 2089 | ATOM_DEVICE_CRT1_SUPPORT); |
1879 | ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); | 2090 | ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC); |
2091 | hpd.hpd = RADEON_HPD_NONE; | ||
1880 | radeon_add_legacy_connector(dev, | 2092 | radeon_add_legacy_connector(dev, |
1881 | 0, | 2093 | 0, |
1882 | ATOM_DEVICE_CRT1_SUPPORT, | 2094 | ATOM_DEVICE_CRT1_SUPPORT, |
1883 | DRM_MODE_CONNECTOR_VGA, | 2095 | DRM_MODE_CONNECTOR_VGA, |
1884 | &ddc_i2c, | 2096 | &ddc_i2c, |
1885 | CONNECTOR_OBJECT_ID_VGA); | 2097 | CONNECTOR_OBJECT_ID_VGA, |
2098 | &hpd); | ||
1886 | } else { | 2099 | } else { |
1887 | DRM_DEBUG("No connector info found\n"); | 2100 | DRM_DEBUG("No connector info found\n"); |
1888 | return false; | 2101 | return false; |
@@ -1910,27 +2123,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1910 | case DDC_MONID: | 2123 | case DDC_MONID: |
1911 | ddc_i2c = | 2124 | ddc_i2c = |
1912 | combios_setup_i2c_bus | 2125 | combios_setup_i2c_bus |
1913 | (RADEON_GPIO_MONID); | 2126 | (rdev, RADEON_GPIO_MONID); |
1914 | break; | 2127 | break; |
1915 | case DDC_DVI: | 2128 | case DDC_DVI: |
1916 | ddc_i2c = | 2129 | ddc_i2c = |
1917 | combios_setup_i2c_bus | 2130 | combios_setup_i2c_bus |
1918 | (RADEON_GPIO_DVI_DDC); | 2131 | (rdev, RADEON_GPIO_DVI_DDC); |
1919 | break; | 2132 | break; |
1920 | case DDC_VGA: | 2133 | case DDC_VGA: |
1921 | ddc_i2c = | 2134 | ddc_i2c = |
1922 | combios_setup_i2c_bus | 2135 | combios_setup_i2c_bus |
1923 | (RADEON_GPIO_VGA_DDC); | 2136 | (rdev, RADEON_GPIO_VGA_DDC); |
1924 | break; | 2137 | break; |
1925 | case DDC_CRT2: | 2138 | case DDC_CRT2: |
1926 | ddc_i2c = | 2139 | ddc_i2c = |
1927 | combios_setup_i2c_bus | 2140 | combios_setup_i2c_bus |
1928 | (RADEON_GPIO_CRT2_DDC); | 2141 | (rdev, RADEON_GPIO_CRT2_DDC); |
1929 | break; | 2142 | break; |
1930 | case DDC_LCD: | 2143 | case DDC_LCD: |
1931 | ddc_i2c = | 2144 | ddc_i2c = |
1932 | combios_setup_i2c_bus | 2145 | combios_setup_i2c_bus |
1933 | (RADEON_LCD_GPIO_MASK); | 2146 | (rdev, RADEON_GPIOPAD_MASK); |
1934 | ddc_i2c.mask_clk_mask = | 2147 | ddc_i2c.mask_clk_mask = |
1935 | RBIOS32(lcd_ddc_info + 3); | 2148 | RBIOS32(lcd_ddc_info + 3); |
1936 | ddc_i2c.mask_data_mask = | 2149 | ddc_i2c.mask_data_mask = |
@@ -1939,19 +2152,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1939 | RBIOS32(lcd_ddc_info + 3); | 2152 | RBIOS32(lcd_ddc_info + 3); |
1940 | ddc_i2c.a_data_mask = | 2153 | ddc_i2c.a_data_mask = |
1941 | RBIOS32(lcd_ddc_info + 7); | 2154 | RBIOS32(lcd_ddc_info + 7); |
1942 | ddc_i2c.put_clk_mask = | 2155 | ddc_i2c.en_clk_mask = |
1943 | RBIOS32(lcd_ddc_info + 3); | 2156 | RBIOS32(lcd_ddc_info + 3); |
1944 | ddc_i2c.put_data_mask = | 2157 | ddc_i2c.en_data_mask = |
1945 | RBIOS32(lcd_ddc_info + 7); | 2158 | RBIOS32(lcd_ddc_info + 7); |
1946 | ddc_i2c.get_clk_mask = | 2159 | ddc_i2c.y_clk_mask = |
1947 | RBIOS32(lcd_ddc_info + 3); | 2160 | RBIOS32(lcd_ddc_info + 3); |
1948 | ddc_i2c.get_data_mask = | 2161 | ddc_i2c.y_data_mask = |
1949 | RBIOS32(lcd_ddc_info + 7); | 2162 | RBIOS32(lcd_ddc_info + 7); |
1950 | break; | 2163 | break; |
1951 | case DDC_GPIO: | 2164 | case DDC_GPIO: |
1952 | ddc_i2c = | 2165 | ddc_i2c = |
1953 | combios_setup_i2c_bus | 2166 | combios_setup_i2c_bus |
1954 | (RADEON_MDGPIO_EN_REG); | 2167 | (rdev, RADEON_MDGPIO_MASK); |
1955 | ddc_i2c.mask_clk_mask = | 2168 | ddc_i2c.mask_clk_mask = |
1956 | RBIOS32(lcd_ddc_info + 3); | 2169 | RBIOS32(lcd_ddc_info + 3); |
1957 | ddc_i2c.mask_data_mask = | 2170 | ddc_i2c.mask_data_mask = |
@@ -1960,13 +2173,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1960 | RBIOS32(lcd_ddc_info + 3); | 2173 | RBIOS32(lcd_ddc_info + 3); |
1961 | ddc_i2c.a_data_mask = | 2174 | ddc_i2c.a_data_mask = |
1962 | RBIOS32(lcd_ddc_info + 7); | 2175 | RBIOS32(lcd_ddc_info + 7); |
1963 | ddc_i2c.put_clk_mask = | 2176 | ddc_i2c.en_clk_mask = |
1964 | RBIOS32(lcd_ddc_info + 3); | 2177 | RBIOS32(lcd_ddc_info + 3); |
1965 | ddc_i2c.put_data_mask = | 2178 | ddc_i2c.en_data_mask = |
1966 | RBIOS32(lcd_ddc_info + 7); | 2179 | RBIOS32(lcd_ddc_info + 7); |
1967 | ddc_i2c.get_clk_mask = | 2180 | ddc_i2c.y_clk_mask = |
1968 | RBIOS32(lcd_ddc_info + 3); | 2181 | RBIOS32(lcd_ddc_info + 3); |
1969 | ddc_i2c.get_data_mask = | 2182 | ddc_i2c.y_data_mask = |
1970 | RBIOS32(lcd_ddc_info + 7); | 2183 | RBIOS32(lcd_ddc_info + 7); |
1971 | break; | 2184 | break; |
1972 | default: | 2185 | default: |
@@ -1977,12 +2190,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1977 | } else | 2190 | } else |
1978 | ddc_i2c.valid = false; | 2191 | ddc_i2c.valid = false; |
1979 | 2192 | ||
2193 | hpd.hpd = RADEON_HPD_NONE; | ||
1980 | radeon_add_legacy_connector(dev, | 2194 | radeon_add_legacy_connector(dev, |
1981 | 5, | 2195 | 5, |
1982 | ATOM_DEVICE_LCD1_SUPPORT, | 2196 | ATOM_DEVICE_LCD1_SUPPORT, |
1983 | DRM_MODE_CONNECTOR_LVDS, | 2197 | DRM_MODE_CONNECTOR_LVDS, |
1984 | &ddc_i2c, | 2198 | &ddc_i2c, |
1985 | CONNECTOR_OBJECT_ID_LVDS); | 2199 | CONNECTOR_OBJECT_ID_LVDS, |
2200 | &hpd); | ||
1986 | } | 2201 | } |
1987 | } | 2202 | } |
1988 | 2203 | ||
@@ -1993,6 +2208,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
1993 | if (tv_info) { | 2208 | if (tv_info) { |
1994 | if (RBIOS8(tv_info + 6) == 'T') { | 2209 | if (RBIOS8(tv_info + 6) == 'T') { |
1995 | if (radeon_apply_legacy_tv_quirks(dev)) { | 2210 | if (radeon_apply_legacy_tv_quirks(dev)) { |
2211 | hpd.hpd = RADEON_HPD_NONE; | ||
1996 | radeon_add_legacy_encoder(dev, | 2212 | radeon_add_legacy_encoder(dev, |
1997 | radeon_get_encoder_id | 2213 | radeon_get_encoder_id |
1998 | (dev, | 2214 | (dev, |
@@ -2003,7 +2219,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2003 | ATOM_DEVICE_TV1_SUPPORT, | 2219 | ATOM_DEVICE_TV1_SUPPORT, |
2004 | DRM_MODE_CONNECTOR_SVIDEO, | 2220 | DRM_MODE_CONNECTOR_SVIDEO, |
2005 | &ddc_i2c, | 2221 | &ddc_i2c, |
2006 | CONNECTOR_OBJECT_ID_SVIDEO); | 2222 | CONNECTOR_OBJECT_ID_SVIDEO, |
2223 | &hpd); | ||
2007 | } | 2224 | } |
2008 | } | 2225 | } |
2009 | } | 2226 | } |
@@ -2014,6 +2231,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) | |||
2014 | return true; | 2231 | return true; |
2015 | } | 2232 | } |
2016 | 2233 | ||
2234 | void radeon_external_tmds_setup(struct drm_encoder *encoder) | ||
2235 | { | ||
2236 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2237 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
2238 | |||
2239 | if (!tmds) | ||
2240 | return; | ||
2241 | |||
2242 | switch (tmds->dvo_chip) { | ||
2243 | case DVO_SIL164: | ||
2244 | /* sil 164 */ | ||
2245 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
2246 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2247 | tmds->slave_addr, | ||
2248 | 0x08, 0x30); | ||
2249 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2250 | tmds->slave_addr, | ||
2251 | 0x09, 0x00); | ||
2252 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2253 | tmds->slave_addr, | ||
2254 | 0x0a, 0x90); | ||
2255 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2256 | tmds->slave_addr, | ||
2257 | 0x0c, 0x89); | ||
2258 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2259 | tmds->slave_addr, | ||
2260 | 0x08, 0x3b); | ||
2261 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
2262 | break; | ||
2263 | case DVO_SIL1178: | ||
2264 | /* sil 1178 - untested */ | ||
2265 | /* | ||
2266 | * 0x0f, 0x44 | ||
2267 | * 0x0f, 0x4c | ||
2268 | * 0x0e, 0x01 | ||
2269 | * 0x0a, 0x80 | ||
2270 | * 0x09, 0x30 | ||
2271 | * 0x0c, 0xc9 | ||
2272 | * 0x0d, 0x70 | ||
2273 | * 0x08, 0x32 | ||
2274 | * 0x08, 0x33 | ||
2275 | */ | ||
2276 | break; | ||
2277 | default: | ||
2278 | break; | ||
2279 | } | ||
2280 | |||
2281 | } | ||
2282 | |||
2283 | bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) | ||
2284 | { | ||
2285 | struct drm_device *dev = encoder->dev; | ||
2286 | struct radeon_device *rdev = dev->dev_private; | ||
2287 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
2288 | uint16_t offset; | ||
2289 | uint8_t blocks, slave_addr, rev; | ||
2290 | uint32_t index, id; | ||
2291 | uint32_t reg, val, and_mask, or_mask; | ||
2292 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
2293 | |||
2294 | if (rdev->bios == NULL) | ||
2295 | return false; | ||
2296 | |||
2297 | if (!tmds) | ||
2298 | return false; | ||
2299 | |||
2300 | if (rdev->flags & RADEON_IS_IGP) { | ||
2301 | offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE); | ||
2302 | rev = RBIOS8(offset); | ||
2303 | if (offset) { | ||
2304 | rev = RBIOS8(offset); | ||
2305 | if (rev > 1) { | ||
2306 | blocks = RBIOS8(offset + 3); | ||
2307 | index = offset + 4; | ||
2308 | while (blocks > 0) { | ||
2309 | id = RBIOS16(index); | ||
2310 | index += 2; | ||
2311 | switch (id >> 13) { | ||
2312 | case 0: | ||
2313 | reg = (id & 0x1fff) * 4; | ||
2314 | val = RBIOS32(index); | ||
2315 | index += 4; | ||
2316 | WREG32(reg, val); | ||
2317 | break; | ||
2318 | case 2: | ||
2319 | reg = (id & 0x1fff) * 4; | ||
2320 | and_mask = RBIOS32(index); | ||
2321 | index += 4; | ||
2322 | or_mask = RBIOS32(index); | ||
2323 | index += 4; | ||
2324 | val = RREG32(reg); | ||
2325 | val = (val & and_mask) | or_mask; | ||
2326 | WREG32(reg, val); | ||
2327 | break; | ||
2328 | case 3: | ||
2329 | val = RBIOS16(index); | ||
2330 | index += 2; | ||
2331 | udelay(val); | ||
2332 | break; | ||
2333 | case 4: | ||
2334 | val = RBIOS16(index); | ||
2335 | index += 2; | ||
2336 | udelay(val * 1000); | ||
2337 | break; | ||
2338 | case 6: | ||
2339 | slave_addr = id & 0xff; | ||
2340 | slave_addr >>= 1; /* 7 bit addressing */ | ||
2341 | index++; | ||
2342 | reg = RBIOS8(index); | ||
2343 | index++; | ||
2344 | val = RBIOS8(index); | ||
2345 | index++; | ||
2346 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
2347 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2348 | slave_addr, | ||
2349 | reg, val); | ||
2350 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
2351 | break; | ||
2352 | default: | ||
2353 | DRM_ERROR("Unknown id %d\n", id >> 13); | ||
2354 | break; | ||
2355 | } | ||
2356 | blocks--; | ||
2357 | } | ||
2358 | return true; | ||
2359 | } | ||
2360 | } | ||
2361 | } else { | ||
2362 | offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); | ||
2363 | if (offset) { | ||
2364 | index = offset + 10; | ||
2365 | id = RBIOS16(index); | ||
2366 | while (id != 0xffff) { | ||
2367 | index += 2; | ||
2368 | switch (id >> 13) { | ||
2369 | case 0: | ||
2370 | reg = (id & 0x1fff) * 4; | ||
2371 | val = RBIOS32(index); | ||
2372 | WREG32(reg, val); | ||
2373 | break; | ||
2374 | case 2: | ||
2375 | reg = (id & 0x1fff) * 4; | ||
2376 | and_mask = RBIOS32(index); | ||
2377 | index += 4; | ||
2378 | or_mask = RBIOS32(index); | ||
2379 | index += 4; | ||
2380 | val = RREG32(reg); | ||
2381 | val = (val & and_mask) | or_mask; | ||
2382 | WREG32(reg, val); | ||
2383 | break; | ||
2384 | case 4: | ||
2385 | val = RBIOS16(index); | ||
2386 | index += 2; | ||
2387 | udelay(val); | ||
2388 | break; | ||
2389 | case 5: | ||
2390 | reg = id & 0x1fff; | ||
2391 | and_mask = RBIOS32(index); | ||
2392 | index += 4; | ||
2393 | or_mask = RBIOS32(index); | ||
2394 | index += 4; | ||
2395 | val = RREG32_PLL(reg); | ||
2396 | val = (val & and_mask) | or_mask; | ||
2397 | WREG32_PLL(reg, val); | ||
2398 | break; | ||
2399 | case 6: | ||
2400 | reg = id & 0x1fff; | ||
2401 | val = RBIOS8(index); | ||
2402 | index += 1; | ||
2403 | radeon_i2c_do_lock(tmds->i2c_bus, 1); | ||
2404 | radeon_i2c_sw_put_byte(tmds->i2c_bus, | ||
2405 | tmds->slave_addr, | ||
2406 | reg, val); | ||
2407 | radeon_i2c_do_lock(tmds->i2c_bus, 0); | ||
2408 | break; | ||
2409 | default: | ||
2410 | DRM_ERROR("Unknown id %d\n", id >> 13); | ||
2411 | break; | ||
2412 | } | ||
2413 | id = RBIOS16(index); | ||
2414 | } | ||
2415 | return true; | ||
2416 | } | ||
2417 | } | ||
2418 | return false; | ||
2419 | } | ||
2420 | |||
2017 | static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) | 2421 | static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) |
2018 | { | 2422 | { |
2019 | struct radeon_device *rdev = dev->dev_private; | 2423 | struct radeon_device *rdev = dev->dev_private; |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index fce4c4087fda..cfa2ebb259fe 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, | |||
40 | struct drm_encoder *encoder, | 40 | struct drm_encoder *encoder, |
41 | bool connected); | 41 | bool connected); |
42 | 42 | ||
43 | void radeon_connector_hotplug(struct drm_connector *connector) | ||
44 | { | ||
45 | struct drm_device *dev = connector->dev; | ||
46 | struct radeon_device *rdev = dev->dev_private; | ||
47 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
48 | |||
49 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | ||
50 | radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd); | ||
51 | |||
52 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | ||
53 | if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | ||
54 | if (radeon_dp_needs_link_train(radeon_connector)) { | ||
55 | if (connector->encoder) | ||
56 | dp_link_train(connector->encoder, connector); | ||
57 | } | ||
58 | } | ||
59 | } | ||
60 | |||
61 | } | ||
62 | |||
43 | static void radeon_property_change_mode(struct drm_encoder *encoder) | 63 | static void radeon_property_change_mode(struct drm_encoder *encoder) |
44 | { | 64 | { |
45 | struct drm_crtc *crtc = encoder->crtc; | 65 | struct drm_crtc *crtc = encoder->crtc; |
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec | |||
445 | ret = connector_status_connected; | 465 | ret = connector_status_connected; |
446 | else { | 466 | else { |
447 | if (radeon_connector->ddc_bus) { | 467 | if (radeon_connector->ddc_bus) { |
448 | radeon_i2c_do_lock(radeon_connector, 1); | 468 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
449 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, | 469 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
450 | &radeon_connector->ddc_bus->adapter); | 470 | &radeon_connector->ddc_bus->adapter); |
451 | radeon_i2c_do_lock(radeon_connector, 0); | 471 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
452 | if (radeon_connector->edid) | 472 | if (radeon_connector->edid) |
453 | ret = connector_status_connected; | 473 | ret = connector_status_connected; |
454 | } | 474 | } |
@@ -553,21 +573,22 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect | |||
553 | if (!encoder) | 573 | if (!encoder) |
554 | ret = connector_status_disconnected; | 574 | ret = connector_status_disconnected; |
555 | 575 | ||
556 | radeon_i2c_do_lock(radeon_connector, 1); | 576 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
557 | dret = radeon_ddc_probe(radeon_connector); | 577 | dret = radeon_ddc_probe(radeon_connector); |
558 | radeon_i2c_do_lock(radeon_connector, 0); | 578 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
559 | if (dret) { | 579 | if (dret) { |
560 | if (radeon_connector->edid) { | 580 | if (radeon_connector->edid) { |
561 | kfree(radeon_connector->edid); | 581 | kfree(radeon_connector->edid); |
562 | radeon_connector->edid = NULL; | 582 | radeon_connector->edid = NULL; |
563 | } | 583 | } |
564 | radeon_i2c_do_lock(radeon_connector, 1); | 584 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
565 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 585 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
566 | radeon_i2c_do_lock(radeon_connector, 0); | 586 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
567 | 587 | ||
568 | if (!radeon_connector->edid) { | 588 | if (!radeon_connector->edid) { |
569 | DRM_ERROR("DDC responded but not EDID found for %s\n", | 589 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", |
570 | drm_get_connector_name(connector)); | 590 | drm_get_connector_name(connector)); |
591 | ret = connector_status_connected; | ||
571 | } else { | 592 | } else { |
572 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | 593 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); |
573 | 594 | ||
@@ -707,21 +728,21 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
707 | enum drm_connector_status ret = connector_status_disconnected; | 728 | enum drm_connector_status ret = connector_status_disconnected; |
708 | bool dret; | 729 | bool dret; |
709 | 730 | ||
710 | radeon_i2c_do_lock(radeon_connector, 1); | 731 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
711 | dret = radeon_ddc_probe(radeon_connector); | 732 | dret = radeon_ddc_probe(radeon_connector); |
712 | radeon_i2c_do_lock(radeon_connector, 0); | 733 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
713 | if (dret) { | 734 | if (dret) { |
714 | if (radeon_connector->edid) { | 735 | if (radeon_connector->edid) { |
715 | kfree(radeon_connector->edid); | 736 | kfree(radeon_connector->edid); |
716 | radeon_connector->edid = NULL; | 737 | radeon_connector->edid = NULL; |
717 | } | 738 | } |
718 | radeon_i2c_do_lock(radeon_connector, 1); | 739 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
719 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 740 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
720 | radeon_i2c_do_lock(radeon_connector, 0); | 741 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
721 | 742 | ||
722 | if (!radeon_connector->edid) { | 743 | if (!radeon_connector->edid) { |
723 | DRM_ERROR("DDC responded but not EDID found for %s\n", | 744 | DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", |
724 | drm_get_connector_name(connector)); | 745 | drm_get_connector_name(connector)); |
725 | } else { | 746 | } else { |
726 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); | 747 | radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); |
727 | 748 | ||
@@ -734,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect | |||
734 | ret = connector_status_disconnected; | 755 | ret = connector_status_disconnected; |
735 | } else | 756 | } else |
736 | ret = connector_status_connected; | 757 | ret = connector_status_connected; |
758 | |||
759 | /* multiple connectors on the same encoder with the same ddc line | ||
760 | * This tends to be HDMI and DVI on the same encoder with the | ||
761 | * same ddc line. If the edid says HDMI, consider the HDMI port | ||
762 | * connected and the DVI port disconnected. If the edid doesn't | ||
763 | * say HDMI, vice versa. | ||
764 | */ | ||
765 | if (radeon_connector->shared_ddc && connector_status_connected) { | ||
766 | struct drm_device *dev = connector->dev; | ||
767 | struct drm_connector *list_connector; | ||
768 | struct radeon_connector *list_radeon_connector; | ||
769 | list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { | ||
770 | if (connector == list_connector) | ||
771 | continue; | ||
772 | list_radeon_connector = to_radeon_connector(list_connector); | ||
773 | if (radeon_connector->devices == list_radeon_connector->devices) { | ||
774 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) { | ||
775 | if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) { | ||
776 | kfree(radeon_connector->edid); | ||
777 | radeon_connector->edid = NULL; | ||
778 | ret = connector_status_disconnected; | ||
779 | } | ||
780 | } else { | ||
781 | if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) || | ||
782 | (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) { | ||
783 | kfree(radeon_connector->edid); | ||
784 | radeon_connector->edid = NULL; | ||
785 | ret = connector_status_disconnected; | ||
786 | } | ||
787 | } | ||
788 | } | ||
789 | } | ||
790 | } | ||
737 | } | 791 | } |
738 | } | 792 | } |
739 | 793 | ||
@@ -862,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = { | |||
862 | .force = radeon_dvi_force, | 916 | .force = radeon_dvi_force, |
863 | }; | 917 | }; |
864 | 918 | ||
919 | static void radeon_dp_connector_destroy(struct drm_connector *connector) | ||
920 | { | ||
921 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
922 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
923 | |||
924 | if (radeon_connector->ddc_bus) | ||
925 | radeon_i2c_destroy(radeon_connector->ddc_bus); | ||
926 | if (radeon_connector->edid) | ||
927 | kfree(radeon_connector->edid); | ||
928 | if (radeon_dig_connector->dp_i2c_bus) | ||
929 | radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); | ||
930 | kfree(radeon_connector->con_priv); | ||
931 | drm_sysfs_connector_remove(connector); | ||
932 | drm_connector_cleanup(connector); | ||
933 | kfree(connector); | ||
934 | } | ||
935 | |||
936 | static int radeon_dp_get_modes(struct drm_connector *connector) | ||
937 | { | ||
938 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
939 | int ret; | ||
940 | |||
941 | ret = radeon_ddc_get_modes(radeon_connector); | ||
942 | return ret; | ||
943 | } | ||
944 | |||
945 | static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector) | ||
946 | { | ||
947 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
948 | enum drm_connector_status ret = connector_status_disconnected; | ||
949 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
950 | u8 sink_type; | ||
951 | |||
952 | if (radeon_connector->edid) { | ||
953 | kfree(radeon_connector->edid); | ||
954 | radeon_connector->edid = NULL; | ||
955 | } | ||
956 | |||
957 | sink_type = radeon_dp_getsinktype(radeon_connector); | ||
958 | if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { | ||
959 | if (radeon_dp_getdpcd(radeon_connector)) { | ||
960 | radeon_dig_connector->dp_sink_type = sink_type; | ||
961 | ret = connector_status_connected; | ||
962 | } | ||
963 | } else { | ||
964 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); | ||
965 | if (radeon_ddc_probe(radeon_connector)) { | ||
966 | radeon_dig_connector->dp_sink_type = sink_type; | ||
967 | ret = connector_status_connected; | ||
968 | } | ||
969 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); | ||
970 | } | ||
971 | |||
972 | return ret; | ||
973 | } | ||
974 | |||
975 | static int radeon_dp_mode_valid(struct drm_connector *connector, | ||
976 | struct drm_display_mode *mode) | ||
977 | { | ||
978 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
979 | struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; | ||
980 | |||
981 | /* XXX check mode bandwidth */ | ||
982 | |||
983 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) | ||
984 | return radeon_dp_mode_valid_helper(radeon_connector, mode); | ||
985 | else | ||
986 | return MODE_OK; | ||
987 | } | ||
988 | |||
989 | struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = { | ||
990 | .get_modes = radeon_dp_get_modes, | ||
991 | .mode_valid = radeon_dp_mode_valid, | ||
992 | .best_encoder = radeon_dvi_encoder, | ||
993 | }; | ||
994 | |||
995 | struct drm_connector_funcs radeon_dp_connector_funcs = { | ||
996 | .dpms = drm_helper_connector_dpms, | ||
997 | .detect = radeon_dp_detect, | ||
998 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
999 | .set_property = radeon_connector_set_property, | ||
1000 | .destroy = radeon_dp_connector_destroy, | ||
1001 | .force = radeon_dvi_force, | ||
1002 | }; | ||
1003 | |||
865 | void | 1004 | void |
866 | radeon_add_atom_connector(struct drm_device *dev, | 1005 | radeon_add_atom_connector(struct drm_device *dev, |
867 | uint32_t connector_id, | 1006 | uint32_t connector_id, |
@@ -870,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
870 | struct radeon_i2c_bus_rec *i2c_bus, | 1009 | struct radeon_i2c_bus_rec *i2c_bus, |
871 | bool linkb, | 1010 | bool linkb, |
872 | uint32_t igp_lane_info, | 1011 | uint32_t igp_lane_info, |
873 | uint16_t connector_object_id) | 1012 | uint16_t connector_object_id, |
1013 | struct radeon_hpd *hpd) | ||
874 | { | 1014 | { |
875 | struct radeon_device *rdev = dev->dev_private; | 1015 | struct radeon_device *rdev = dev->dev_private; |
876 | struct drm_connector *connector; | 1016 | struct drm_connector *connector; |
@@ -910,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
910 | radeon_connector->devices = supported_device; | 1050 | radeon_connector->devices = supported_device; |
911 | radeon_connector->shared_ddc = shared_ddc; | 1051 | radeon_connector->shared_ddc = shared_ddc; |
912 | radeon_connector->connector_object_id = connector_object_id; | 1052 | radeon_connector->connector_object_id = connector_object_id; |
1053 | radeon_connector->hpd = *hpd; | ||
913 | switch (connector_type) { | 1054 | switch (connector_type) { |
914 | case DRM_MODE_CONNECTOR_VGA: | 1055 | case DRM_MODE_CONNECTOR_VGA: |
915 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1056 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -996,10 +1137,12 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
996 | radeon_dig_connector->linkb = linkb; | 1137 | radeon_dig_connector->linkb = linkb; |
997 | radeon_dig_connector->igp_lane_info = igp_lane_info; | 1138 | radeon_dig_connector->igp_lane_info = igp_lane_info; |
998 | radeon_connector->con_priv = radeon_dig_connector; | 1139 | radeon_connector->con_priv = radeon_dig_connector; |
999 | drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); | 1140 | drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type); |
1000 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); | 1141 | ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs); |
1001 | if (ret) | 1142 | if (ret) |
1002 | goto failed; | 1143 | goto failed; |
1144 | /* add DP i2c bus */ | ||
1145 | radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch"); | ||
1003 | if (i2c_bus->valid) { | 1146 | if (i2c_bus->valid) { |
1004 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); | 1147 | radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); |
1005 | if (!radeon_connector->ddc_bus) | 1148 | if (!radeon_connector->ddc_bus) |
@@ -1019,6 +1162,9 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
1019 | drm_connector_attach_property(&radeon_connector->base, | 1162 | drm_connector_attach_property(&radeon_connector->base, |
1020 | rdev->mode_info.load_detect_property, | 1163 | rdev->mode_info.load_detect_property, |
1021 | 1); | 1164 | 1); |
1165 | drm_connector_attach_property(&radeon_connector->base, | ||
1166 | rdev->mode_info.tv_std_property, | ||
1167 | 1); | ||
1022 | } | 1168 | } |
1023 | break; | 1169 | break; |
1024 | case DRM_MODE_CONNECTOR_LVDS: | 1170 | case DRM_MODE_CONNECTOR_LVDS: |
@@ -1062,7 +1208,8 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1062 | uint32_t supported_device, | 1208 | uint32_t supported_device, |
1063 | int connector_type, | 1209 | int connector_type, |
1064 | struct radeon_i2c_bus_rec *i2c_bus, | 1210 | struct radeon_i2c_bus_rec *i2c_bus, |
1065 | uint16_t connector_object_id) | 1211 | uint16_t connector_object_id, |
1212 | struct radeon_hpd *hpd) | ||
1066 | { | 1213 | { |
1067 | struct radeon_device *rdev = dev->dev_private; | 1214 | struct radeon_device *rdev = dev->dev_private; |
1068 | struct drm_connector *connector; | 1215 | struct drm_connector *connector; |
@@ -1092,6 +1239,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1092 | radeon_connector->connector_id = connector_id; | 1239 | radeon_connector->connector_id = connector_id; |
1093 | radeon_connector->devices = supported_device; | 1240 | radeon_connector->devices = supported_device; |
1094 | radeon_connector->connector_object_id = connector_object_id; | 1241 | radeon_connector->connector_object_id = connector_object_id; |
1242 | radeon_connector->hpd = *hpd; | ||
1095 | switch (connector_type) { | 1243 | switch (connector_type) { |
1096 | case DRM_MODE_CONNECTOR_VGA: | 1244 | case DRM_MODE_CONNECTOR_VGA: |
1097 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); | 1245 | drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
@@ -1149,9 +1297,19 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
1149 | if (ret) | 1297 | if (ret) |
1150 | goto failed; | 1298 | goto failed; |
1151 | radeon_connector->dac_load_detect = true; | 1299 | radeon_connector->dac_load_detect = true; |
1300 | /* RS400,RC410,RS480 chipset seems to report a lot | ||
1301 | * of false positive on load detect, we haven't yet | ||
1302 | * found a way to make load detect reliable on those | ||
1303 | * chipset, thus just disable it for TV. | ||
1304 | */ | ||
1305 | if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) | ||
1306 | radeon_connector->dac_load_detect = false; | ||
1152 | drm_connector_attach_property(&radeon_connector->base, | 1307 | drm_connector_attach_property(&radeon_connector->base, |
1153 | rdev->mode_info.load_detect_property, | 1308 | rdev->mode_info.load_detect_property, |
1154 | 1); | 1309 | 1); |
1310 | drm_connector_attach_property(&radeon_connector->base, | ||
1311 | rdev->mode_info.tv_std_property, | ||
1312 | 1); | ||
1155 | } | 1313 | } |
1156 | break; | 1314 | break; |
1157 | case DRM_MODE_CONNECTOR_LVDS: | 1315 | case DRM_MODE_CONNECTOR_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 5ab2cf96a264..65590a0f1d93 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
76 | } | 76 | } |
77 | p->relocs_ptr[i] = &p->relocs[i]; | 77 | p->relocs_ptr[i] = &p->relocs[i]; |
78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; | 78 | p->relocs[i].robj = p->relocs[i].gobj->driver_private; |
79 | p->relocs[i].lobj.robj = p->relocs[i].robj; | 79 | p->relocs[i].lobj.bo = p->relocs[i].robj; |
80 | p->relocs[i].lobj.rdomain = r->read_domains; | 80 | p->relocs[i].lobj.rdomain = r->read_domains; |
81 | p->relocs[i].lobj.wdomain = r->write_domain; | 81 | p->relocs[i].lobj.wdomain = r->write_domain; |
82 | p->relocs[i].handle = r->handle; | 82 | p->relocs[i].handle = r->handle; |
83 | p->relocs[i].flags = r->flags; | 83 | p->relocs[i].flags = r->flags; |
84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); | 84 | INIT_LIST_HEAD(&p->relocs[i].lobj.list); |
85 | radeon_object_list_add_object(&p->relocs[i].lobj, | 85 | radeon_bo_list_add_object(&p->relocs[i].lobj, |
86 | &p->validated); | 86 | &p->validated); |
87 | } | 87 | } |
88 | } | 88 | } |
89 | return radeon_object_list_validate(&p->validated, p->ib->fence); | 89 | return radeon_bo_list_validate(&p->validated, p->ib->fence); |
90 | } | 90 | } |
91 | 91 | ||
92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 92 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) | |||
190 | unsigned i; | 190 | unsigned i; |
191 | 191 | ||
192 | if (error) { | 192 | if (error) { |
193 | radeon_object_list_unvalidate(&parser->validated); | 193 | radeon_bo_list_unvalidate(&parser->validated, |
194 | parser->ib->fence); | ||
194 | } else { | 195 | } else { |
195 | radeon_object_list_clean(&parser->validated); | 196 | radeon_bo_list_unreserve(&parser->validated); |
196 | } | 197 | } |
197 | for (i = 0; i < parser->nrelocs; i++) { | 198 | for (i = 0; i < parser->nrelocs; i++) { |
198 | if (parser->relocs[i].gobj) { | 199 | if (parser->relocs[i].gobj) { |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e3f9edfa40fe..7e55647f118e 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -208,6 +208,24 @@ bool radeon_card_posted(struct radeon_device *rdev) | |||
208 | 208 | ||
209 | } | 209 | } |
210 | 210 | ||
211 | bool radeon_boot_test_post_card(struct radeon_device *rdev) | ||
212 | { | ||
213 | if (radeon_card_posted(rdev)) | ||
214 | return true; | ||
215 | |||
216 | if (rdev->bios) { | ||
217 | DRM_INFO("GPU not posted. posting now...\n"); | ||
218 | if (rdev->is_atom_bios) | ||
219 | atom_asic_init(rdev->mode_info.atom_context); | ||
220 | else | ||
221 | radeon_combios_asic_init(rdev->ddev); | ||
222 | return true; | ||
223 | } else { | ||
224 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
225 | return false; | ||
226 | } | ||
227 | } | ||
228 | |||
211 | int radeon_dummy_page_init(struct radeon_device *rdev) | 229 | int radeon_dummy_page_init(struct radeon_device *rdev) |
212 | { | 230 | { |
213 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); | 231 | rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); |
@@ -463,11 +481,13 @@ int radeon_atombios_init(struct radeon_device *rdev) | |||
463 | 481 | ||
464 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); | 482 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); |
465 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); | 483 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
484 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); | ||
466 | return 0; | 485 | return 0; |
467 | } | 486 | } |
468 | 487 | ||
469 | void radeon_atombios_fini(struct radeon_device *rdev) | 488 | void radeon_atombios_fini(struct radeon_device *rdev) |
470 | { | 489 | { |
490 | kfree(rdev->mode_info.atom_context->scratch); | ||
471 | kfree(rdev->mode_info.atom_context); | 491 | kfree(rdev->mode_info.atom_context); |
472 | kfree(rdev->mode_info.atom_card_info); | 492 | kfree(rdev->mode_info.atom_card_info); |
473 | } | 493 | } |
@@ -544,16 +564,24 @@ int radeon_device_init(struct radeon_device *rdev, | |||
544 | mutex_init(&rdev->cs_mutex); | 564 | mutex_init(&rdev->cs_mutex); |
545 | mutex_init(&rdev->ib_pool.mutex); | 565 | mutex_init(&rdev->ib_pool.mutex); |
546 | mutex_init(&rdev->cp.mutex); | 566 | mutex_init(&rdev->cp.mutex); |
567 | if (rdev->family >= CHIP_R600) | ||
568 | spin_lock_init(&rdev->ih.lock); | ||
569 | mutex_init(&rdev->gem.mutex); | ||
547 | rwlock_init(&rdev->fence_drv.lock); | 570 | rwlock_init(&rdev->fence_drv.lock); |
548 | INIT_LIST_HEAD(&rdev->gem.objects); | 571 | INIT_LIST_HEAD(&rdev->gem.objects); |
549 | 572 | ||
573 | /* setup workqueue */ | ||
574 | rdev->wq = create_workqueue("radeon"); | ||
575 | if (rdev->wq == NULL) | ||
576 | return -ENOMEM; | ||
577 | |||
550 | /* Set asic functions */ | 578 | /* Set asic functions */ |
551 | r = radeon_asic_init(rdev); | 579 | r = radeon_asic_init(rdev); |
552 | if (r) { | 580 | if (r) { |
553 | return r; | 581 | return r; |
554 | } | 582 | } |
555 | 583 | ||
556 | if (radeon_agpmode == -1) { | 584 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
557 | radeon_agp_disable(rdev); | 585 | radeon_agp_disable(rdev); |
558 | } | 586 | } |
559 | 587 | ||
@@ -620,6 +648,7 @@ void radeon_device_fini(struct radeon_device *rdev) | |||
620 | DRM_INFO("radeon: finishing device.\n"); | 648 | DRM_INFO("radeon: finishing device.\n"); |
621 | rdev->shutdown = true; | 649 | rdev->shutdown = true; |
622 | radeon_fini(rdev); | 650 | radeon_fini(rdev); |
651 | destroy_workqueue(rdev->wq); | ||
623 | vga_client_register(rdev->pdev, NULL, NULL, NULL); | 652 | vga_client_register(rdev->pdev, NULL, NULL, NULL); |
624 | iounmap(rdev->rmmio); | 653 | iounmap(rdev->rmmio); |
625 | rdev->rmmio = NULL; | 654 | rdev->rmmio = NULL; |
@@ -633,6 +662,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
633 | { | 662 | { |
634 | struct radeon_device *rdev = dev->dev_private; | 663 | struct radeon_device *rdev = dev->dev_private; |
635 | struct drm_crtc *crtc; | 664 | struct drm_crtc *crtc; |
665 | int r; | ||
636 | 666 | ||
637 | if (dev == NULL || rdev == NULL) { | 667 | if (dev == NULL || rdev == NULL) { |
638 | return -ENODEV; | 668 | return -ENODEV; |
@@ -643,26 +673,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) | |||
643 | /* unpin the front buffers */ | 673 | /* unpin the front buffers */ |
644 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 674 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
645 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); | 675 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); |
646 | struct radeon_object *robj; | 676 | struct radeon_bo *robj; |
647 | 677 | ||
648 | if (rfb == NULL || rfb->obj == NULL) { | 678 | if (rfb == NULL || rfb->obj == NULL) { |
649 | continue; | 679 | continue; |
650 | } | 680 | } |
651 | robj = rfb->obj->driver_private; | 681 | robj = rfb->obj->driver_private; |
652 | if (robj != rdev->fbdev_robj) { | 682 | if (robj != rdev->fbdev_rbo) { |
653 | radeon_object_unpin(robj); | 683 | r = radeon_bo_reserve(robj, false); |
684 | if (unlikely(r == 0)) { | ||
685 | radeon_bo_unpin(robj); | ||
686 | radeon_bo_unreserve(robj); | ||
687 | } | ||
654 | } | 688 | } |
655 | } | 689 | } |
656 | /* evict vram memory */ | 690 | /* evict vram memory */ |
657 | radeon_object_evict_vram(rdev); | 691 | radeon_bo_evict_vram(rdev); |
658 | /* wait for gpu to finish processing current batch */ | 692 | /* wait for gpu to finish processing current batch */ |
659 | radeon_fence_wait_last(rdev); | 693 | radeon_fence_wait_last(rdev); |
660 | 694 | ||
661 | radeon_save_bios_scratch_regs(rdev); | 695 | radeon_save_bios_scratch_regs(rdev); |
662 | 696 | ||
663 | radeon_suspend(rdev); | 697 | radeon_suspend(rdev); |
698 | radeon_hpd_fini(rdev); | ||
664 | /* evict remaining vram memory */ | 699 | /* evict remaining vram memory */ |
665 | radeon_object_evict_vram(rdev); | 700 | radeon_bo_evict_vram(rdev); |
666 | 701 | ||
667 | pci_save_state(dev->pdev); | 702 | pci_save_state(dev->pdev); |
668 | if (state.event == PM_EVENT_SUSPEND) { | 703 | if (state.event == PM_EVENT_SUSPEND) { |
@@ -688,11 +723,15 @@ int radeon_resume_kms(struct drm_device *dev) | |||
688 | return -1; | 723 | return -1; |
689 | } | 724 | } |
690 | pci_set_master(dev->pdev); | 725 | pci_set_master(dev->pdev); |
726 | /* resume AGP if in use */ | ||
727 | radeon_agp_resume(rdev); | ||
691 | radeon_resume(rdev); | 728 | radeon_resume(rdev); |
692 | radeon_restore_bios_scratch_regs(rdev); | 729 | radeon_restore_bios_scratch_regs(rdev); |
693 | fb_set_suspend(rdev->fbdev_info, 0); | 730 | fb_set_suspend(rdev->fbdev_info, 0); |
694 | release_console_sem(); | 731 | release_console_sem(); |
695 | 732 | ||
733 | /* reset hpd state */ | ||
734 | radeon_hpd_init(rdev); | ||
696 | /* blat the mode back in */ | 735 | /* blat the mode back in */ |
697 | drm_helper_resume_force_mode(dev); | 736 | drm_helper_resume_force_mode(dev); |
698 | return 0; | 737 | return 0; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index c85df4afcb7a..c115f2e442eb 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -250,6 +250,16 @@ static const char *connector_names[13] = { | |||
250 | "HDMI-B", | 250 | "HDMI-B", |
251 | }; | 251 | }; |
252 | 252 | ||
253 | static const char *hpd_names[7] = { | ||
254 | "NONE", | ||
255 | "HPD1", | ||
256 | "HPD2", | ||
257 | "HPD3", | ||
258 | "HPD4", | ||
259 | "HPD5", | ||
260 | "HPD6", | ||
261 | }; | ||
262 | |||
253 | static void radeon_print_display_setup(struct drm_device *dev) | 263 | static void radeon_print_display_setup(struct drm_device *dev) |
254 | { | 264 | { |
255 | struct drm_connector *connector; | 265 | struct drm_connector *connector; |
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev) | |||
264 | radeon_connector = to_radeon_connector(connector); | 274 | radeon_connector = to_radeon_connector(connector); |
265 | DRM_INFO("Connector %d:\n", i); | 275 | DRM_INFO("Connector %d:\n", i); |
266 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); | 276 | DRM_INFO(" %s\n", connector_names[connector->connector_type]); |
277 | if (radeon_connector->hpd.hpd != RADEON_HPD_NONE) | ||
278 | DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]); | ||
267 | if (radeon_connector->ddc_bus) | 279 | if (radeon_connector->ddc_bus) |
268 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | 280 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", |
269 | radeon_connector->ddc_bus->rec.mask_clk_reg, | 281 | radeon_connector->ddc_bus->rec.mask_clk_reg, |
270 | radeon_connector->ddc_bus->rec.mask_data_reg, | 282 | radeon_connector->ddc_bus->rec.mask_data_reg, |
271 | radeon_connector->ddc_bus->rec.a_clk_reg, | 283 | radeon_connector->ddc_bus->rec.a_clk_reg, |
272 | radeon_connector->ddc_bus->rec.a_data_reg, | 284 | radeon_connector->ddc_bus->rec.a_data_reg, |
273 | radeon_connector->ddc_bus->rec.put_clk_reg, | 285 | radeon_connector->ddc_bus->rec.en_clk_reg, |
274 | radeon_connector->ddc_bus->rec.put_data_reg, | 286 | radeon_connector->ddc_bus->rec.en_data_reg, |
275 | radeon_connector->ddc_bus->rec.get_clk_reg, | 287 | radeon_connector->ddc_bus->rec.y_clk_reg, |
276 | radeon_connector->ddc_bus->rec.get_data_reg); | 288 | radeon_connector->ddc_bus->rec.y_data_reg); |
277 | DRM_INFO(" Encoders:\n"); | 289 | DRM_INFO(" Encoders:\n"); |
278 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 290 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
279 | radeon_encoder = to_radeon_encoder(encoder); | 291 | radeon_encoder = to_radeon_encoder(encoder); |
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) | |||
324 | ret = radeon_get_legacy_connector_info_from_table(dev); | 336 | ret = radeon_get_legacy_connector_info_from_table(dev); |
325 | } | 337 | } |
326 | if (ret) { | 338 | if (ret) { |
339 | radeon_setup_encoder_clones(dev); | ||
327 | radeon_print_display_setup(dev); | 340 | radeon_print_display_setup(dev); |
328 | list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) | 341 | list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) |
329 | radeon_ddc_dump(drm_connector); | 342 | radeon_ddc_dump(drm_connector); |
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) | |||
336 | { | 349 | { |
337 | int ret = 0; | 350 | int ret = 0; |
338 | 351 | ||
352 | if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | ||
353 | struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; | ||
354 | if (dig->dp_i2c_bus) | ||
355 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter); | ||
356 | } | ||
339 | if (!radeon_connector->ddc_bus) | 357 | if (!radeon_connector->ddc_bus) |
340 | return -1; | 358 | return -1; |
341 | if (!radeon_connector->edid) { | 359 | if (!radeon_connector->edid) { |
342 | radeon_i2c_do_lock(radeon_connector, 1); | 360 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
343 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); | 361 | radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
344 | radeon_i2c_do_lock(radeon_connector, 0); | 362 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
345 | } | 363 | } |
346 | 364 | ||
347 | if (radeon_connector->edid) { | 365 | if (radeon_connector->edid) { |
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector) | |||
361 | 379 | ||
362 | if (!radeon_connector->ddc_bus) | 380 | if (!radeon_connector->ddc_bus) |
363 | return -1; | 381 | return -1; |
364 | radeon_i2c_do_lock(radeon_connector, 1); | 382 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); |
365 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); | 383 | edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); |
366 | radeon_i2c_do_lock(radeon_connector, 0); | 384 | radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); |
367 | if (edid) { | 385 | if (edid) { |
368 | kfree(edid); | 386 | kfree(edid); |
369 | } | 387 | } |
@@ -723,6 +741,8 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
723 | if (!ret) { | 741 | if (!ret) { |
724 | return ret; | 742 | return ret; |
725 | } | 743 | } |
744 | /* initialize hpd */ | ||
745 | radeon_hpd_init(rdev); | ||
726 | drm_helper_initial_config(rdev->ddev); | 746 | drm_helper_initial_config(rdev->ddev); |
727 | return 0; | 747 | return 0; |
728 | } | 748 | } |
@@ -730,6 +750,7 @@ int radeon_modeset_init(struct radeon_device *rdev) | |||
730 | void radeon_modeset_fini(struct radeon_device *rdev) | 750 | void radeon_modeset_fini(struct radeon_device *rdev) |
731 | { | 751 | { |
732 | if (rdev->mode_info.mode_config_initialized) { | 752 | if (rdev->mode_info.mode_config_initialized) { |
753 | radeon_hpd_fini(rdev); | ||
733 | drm_mode_config_cleanup(rdev->ddev); | 754 | drm_mode_config_cleanup(rdev->ddev); |
734 | rdev->mode_info.mode_config_initialized = false; | 755 | rdev->mode_info.mode_config_initialized = false; |
735 | } | 756 | } |
@@ -750,9 +771,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |||
750 | if (encoder->crtc != crtc) | 771 | if (encoder->crtc != crtc) |
751 | continue; | 772 | continue; |
752 | if (first) { | 773 | if (first) { |
753 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | 774 | /* set scaling */ |
775 | if (radeon_encoder->rmx_type == RMX_OFF) | ||
776 | radeon_crtc->rmx_type = RMX_OFF; | ||
777 | else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay || | ||
778 | mode->vdisplay < radeon_encoder->native_mode.vdisplay) | ||
779 | radeon_crtc->rmx_type = radeon_encoder->rmx_type; | ||
780 | else | ||
781 | radeon_crtc->rmx_type = RMX_OFF; | ||
782 | /* copy native mode */ | ||
754 | memcpy(&radeon_crtc->native_mode, | 783 | memcpy(&radeon_crtc->native_mode, |
755 | &radeon_encoder->native_mode, | 784 | &radeon_encoder->native_mode, |
756 | sizeof(struct drm_display_mode)); | 785 | sizeof(struct drm_display_mode)); |
757 | first = false; | 786 | first = false; |
758 | } else { | 787 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index 350962e0f346..e13785282a82 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h | |||
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); | |||
1104 | # define R600_IT_WAIT_REG_MEM 0x00003C00 | 1104 | # define R600_IT_WAIT_REG_MEM 0x00003C00 |
1105 | # define R600_IT_MEM_WRITE 0x00003D00 | 1105 | # define R600_IT_MEM_WRITE 0x00003D00 |
1106 | # define R600_IT_INDIRECT_BUFFER 0x00003200 | 1106 | # define R600_IT_INDIRECT_BUFFER 0x00003200 |
1107 | # define R600_IT_CP_INTERRUPT 0x00004000 | ||
1108 | # define R600_IT_SURFACE_SYNC 0x00004300 | 1107 | # define R600_IT_SURFACE_SYNC 0x00004300 |
1109 | # define R600_CB0_DEST_BASE_ENA (1 << 6) | 1108 | # define R600_CB0_DEST_BASE_ENA (1 << 6) |
1110 | # define R600_TC_ACTION_ENA (1 << 23) | 1109 | # define R600_TC_ACTION_ENA (1 << 23) |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index d42bc512d75a..b4f23ec93201 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -35,6 +35,51 @@ extern int atom_debug; | |||
35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | 35 | bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
36 | struct drm_display_mode *mode); | 36 | struct drm_display_mode *mode); |
37 | 37 | ||
38 | static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) | ||
39 | { | ||
40 | struct drm_device *dev = encoder->dev; | ||
41 | struct radeon_device *rdev = dev->dev_private; | ||
42 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
43 | struct drm_encoder *clone_encoder; | ||
44 | uint32_t index_mask = 0; | ||
45 | int count; | ||
46 | |||
47 | /* DIG routing gets problematic */ | ||
48 | if (rdev->family >= CHIP_R600) | ||
49 | return index_mask; | ||
50 | /* LVDS/TV are too wacky */ | ||
51 | if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) | ||
52 | return index_mask; | ||
53 | /* DVO requires 2x ppll clocks depending on tmds chip */ | ||
54 | if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) | ||
55 | return index_mask; | ||
56 | |||
57 | count = -1; | ||
58 | list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { | ||
59 | struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); | ||
60 | count++; | ||
61 | |||
62 | if (clone_encoder == encoder) | ||
63 | continue; | ||
64 | if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT)) | ||
65 | continue; | ||
66 | if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT) | ||
67 | continue; | ||
68 | else | ||
69 | index_mask |= (1 << count); | ||
70 | } | ||
71 | return index_mask; | ||
72 | } | ||
73 | |||
74 | void radeon_setup_encoder_clones(struct drm_device *dev) | ||
75 | { | ||
76 | struct drm_encoder *encoder; | ||
77 | |||
78 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | ||
79 | encoder->possible_clones = radeon_encoder_clones(encoder); | ||
80 | } | ||
81 | } | ||
82 | |||
38 | uint32_t | 83 | uint32_t |
39 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) | 84 | radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) |
40 | { | 85 | { |
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) | |||
163 | return NULL; | 208 | return NULL; |
164 | } | 209 | } |
165 | 210 | ||
166 | /* used for both atom and legacy */ | ||
167 | void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | ||
168 | struct drm_display_mode *mode, | ||
169 | struct drm_display_mode *adjusted_mode) | ||
170 | { | ||
171 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
172 | struct drm_device *dev = encoder->dev; | ||
173 | struct radeon_device *rdev = dev->dev_private; | ||
174 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
175 | |||
176 | if (mode->hdisplay < native_mode->hdisplay || | ||
177 | mode->vdisplay < native_mode->vdisplay) { | ||
178 | int mode_id = adjusted_mode->base.id; | ||
179 | *adjusted_mode = *native_mode; | ||
180 | if (!ASIC_IS_AVIVO(rdev)) { | ||
181 | adjusted_mode->hdisplay = mode->hdisplay; | ||
182 | adjusted_mode->vdisplay = mode->vdisplay; | ||
183 | } | ||
184 | adjusted_mode->base.id = mode_id; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | |||
189 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | 211 | static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, |
190 | struct drm_display_mode *mode, | 212 | struct drm_display_mode *mode, |
191 | struct drm_display_mode *adjusted_mode) | 213 | struct drm_display_mode *adjusted_mode) |
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
198 | radeon_encoder_set_active_device(encoder); | 220 | radeon_encoder_set_active_device(encoder); |
199 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 221 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
200 | 222 | ||
201 | if (radeon_encoder->rmx_type != RMX_OFF) | ||
202 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | ||
203 | |||
204 | /* hw bug */ | 223 | /* hw bug */ |
205 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) | 224 | if ((mode->flags & DRM_MODE_FLAG_INTERLACE) |
206 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) | 225 | && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) |
207 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; | 226 | adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; |
208 | 227 | ||
228 | /* get the native mode for LVDS */ | ||
229 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { | ||
230 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
231 | int mode_id = adjusted_mode->base.id; | ||
232 | *adjusted_mode = *native_mode; | ||
233 | if (!ASIC_IS_AVIVO(rdev)) { | ||
234 | adjusted_mode->hdisplay = mode->hdisplay; | ||
235 | adjusted_mode->vdisplay = mode->vdisplay; | ||
236 | } | ||
237 | adjusted_mode->base.id = mode_id; | ||
238 | } | ||
239 | |||
240 | /* get the native mode for TV */ | ||
209 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { | 241 | if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { |
210 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; | 242 | struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; |
211 | if (tv_dac) { | 243 | if (tv_dac) { |
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
218 | } | 250 | } |
219 | } | 251 | } |
220 | 252 | ||
253 | if (ASIC_IS_DCE3(rdev) && | ||
254 | (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) { | ||
255 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
256 | radeon_dp_set_link_config(connector, mode); | ||
257 | } | ||
258 | |||
221 | return true; | 259 | return true; |
222 | } | 260 | } |
223 | 261 | ||
@@ -392,7 +430,7 @@ union lvds_encoder_control { | |||
392 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; | 430 | LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; |
393 | }; | 431 | }; |
394 | 432 | ||
395 | static void | 433 | void |
396 | atombios_digital_setup(struct drm_encoder *encoder, int action) | 434 | atombios_digital_setup(struct drm_encoder *encoder, int action) |
397 | { | 435 | { |
398 | struct drm_device *dev = encoder->dev; | 436 | struct drm_device *dev = encoder->dev; |
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
522 | { | 560 | { |
523 | struct drm_connector *connector; | 561 | struct drm_connector *connector; |
524 | struct radeon_connector *radeon_connector; | 562 | struct radeon_connector *radeon_connector; |
563 | struct radeon_connector_atom_dig *radeon_dig_connector; | ||
525 | 564 | ||
526 | connector = radeon_get_connector_for_encoder(encoder); | 565 | connector = radeon_get_connector_for_encoder(encoder); |
527 | if (!connector) | 566 | if (!connector) |
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
551 | return ATOM_ENCODER_MODE_LVDS; | 590 | return ATOM_ENCODER_MODE_LVDS; |
552 | break; | 591 | break; |
553 | case DRM_MODE_CONNECTOR_DisplayPort: | 592 | case DRM_MODE_CONNECTOR_DisplayPort: |
554 | /*if (radeon_output->MonType == MT_DP) | 593 | radeon_dig_connector = radeon_connector->con_priv; |
555 | return ATOM_ENCODER_MODE_DP; | 594 | if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) |
556 | else*/ | 595 | return ATOM_ENCODER_MODE_DP; |
557 | if (drm_detect_hdmi_monitor(radeon_connector->edid)) | 596 | else if (drm_detect_hdmi_monitor(radeon_connector->edid)) |
558 | return ATOM_ENCODER_MODE_HDMI; | 597 | return ATOM_ENCODER_MODE_HDMI; |
559 | else | 598 | else |
560 | return ATOM_ENCODER_MODE_DVI; | 599 | return ATOM_ENCODER_MODE_DVI; |
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) | |||
573 | } | 612 | } |
574 | } | 613 | } |
575 | 614 | ||
615 | /* | ||
616 | * DIG Encoder/Transmitter Setup | ||
617 | * | ||
618 | * DCE 3.0/3.1 | ||
619 | * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA. | ||
620 | * Supports up to 3 digital outputs | ||
621 | * - 2 DIG encoder blocks. | ||
622 | * DIG1 can drive UNIPHY link A or link B | ||
623 | * DIG2 can drive UNIPHY link B or LVTMA | ||
624 | * | ||
625 | * DCE 3.2 | ||
626 | * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B). | ||
627 | * Supports up to 5 digital outputs | ||
628 | * - 2 DIG encoder blocks. | ||
629 | * DIG1/2 can drive UNIPHY0/1/2 link A or link B | ||
630 | * | ||
631 | * Routing | ||
632 | * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) | ||
633 | * Examples: | ||
634 | * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI | ||
635 | * crtc1 -> dig1 -> UNIPHY0 link B -> DP | ||
636 | * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS | ||
637 | * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI | ||
638 | */ | ||
576 | static void | 639 | static void |
577 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | 640 | atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) |
578 | { | 641 | { |
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
614 | } else { | 677 | } else { |
615 | switch (radeon_encoder->encoder_id) { | 678 | switch (radeon_encoder->encoder_id) { |
616 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 679 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
617 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | 680 | /* XXX doesn't really matter which dig encoder we pick as long as it's |
681 | * not already in use | ||
682 | */ | ||
683 | if (dig_connector->linkb) | ||
684 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | ||
685 | else | ||
686 | index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); | ||
618 | num = 1; | 687 | num = 1; |
619 | break; | 688 | break; |
620 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 689 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
690 | /* Only dig2 encoder can drive LVTMA */ | ||
621 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); | 691 | index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); |
622 | num = 2; | 692 | num = 2; |
623 | break; | 693 | break; |
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) | |||
652 | } | 722 | } |
653 | } | 723 | } |
654 | 724 | ||
655 | if (radeon_encoder->pixel_clock > 165000) { | 725 | args.ucEncoderMode = atombios_get_encoder_mode(encoder); |
656 | args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; | 726 | |
727 | if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) { | ||
728 | if (dig_connector->dp_clock == 270000) | ||
729 | args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; | ||
730 | args.ucLaneNum = dig_connector->dp_lane_count; | ||
731 | } else if (radeon_encoder->pixel_clock > 165000) | ||
657 | args.ucLaneNum = 8; | 732 | args.ucLaneNum = 8; |
658 | } else { | 733 | else |
659 | if (dig_connector->linkb) | ||
660 | args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | ||
661 | else | ||
662 | args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | ||
663 | args.ucLaneNum = 4; | 734 | args.ucLaneNum = 4; |
664 | } | ||
665 | 735 | ||
666 | args.ucEncoderMode = atombios_get_encoder_mode(encoder); | 736 | if (dig_connector->linkb) |
737 | args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; | ||
738 | else | ||
739 | args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; | ||
667 | 740 | ||
668 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 741 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
669 | 742 | ||
@@ -674,8 +747,8 @@ union dig_transmitter_control { | |||
674 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; | 747 | DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; |
675 | }; | 748 | }; |
676 | 749 | ||
677 | static void | 750 | void |
678 | atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | 751 | atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set) |
679 | { | 752 | { |
680 | struct drm_device *dev = encoder->dev; | 753 | struct drm_device *dev = encoder->dev; |
681 | struct radeon_device *rdev = dev->dev_private; | 754 | struct radeon_device *rdev = dev->dev_private; |
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
687 | struct drm_connector *connector; | 760 | struct drm_connector *connector; |
688 | struct radeon_connector *radeon_connector; | 761 | struct radeon_connector *radeon_connector; |
689 | struct radeon_connector_atom_dig *dig_connector; | 762 | struct radeon_connector_atom_dig *dig_connector; |
763 | bool is_dp = false; | ||
690 | 764 | ||
691 | connector = radeon_get_connector_for_encoder(encoder); | 765 | connector = radeon_get_connector_for_encoder(encoder); |
692 | if (!connector) | 766 | if (!connector) |
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
704 | 778 | ||
705 | dig_connector = radeon_connector->con_priv; | 779 | dig_connector = radeon_connector->con_priv; |
706 | 780 | ||
781 | if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) | ||
782 | is_dp = true; | ||
783 | |||
707 | memset(&args, 0, sizeof(args)); | 784 | memset(&args, 0, sizeof(args)); |
708 | 785 | ||
709 | if (ASIC_IS_DCE32(rdev)) | 786 | if (ASIC_IS_DCE32(rdev)) |
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
724 | args.v1.ucAction = action; | 801 | args.v1.ucAction = action; |
725 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { | 802 | if (action == ATOM_TRANSMITTER_ACTION_INIT) { |
726 | args.v1.usInitInfo = radeon_connector->connector_object_id; | 803 | args.v1.usInitInfo = radeon_connector->connector_object_id; |
804 | } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { | ||
805 | args.v1.asMode.ucLaneSel = lane_num; | ||
806 | args.v1.asMode.ucLaneSet = lane_set; | ||
727 | } else { | 807 | } else { |
728 | if (radeon_encoder->pixel_clock > 165000) | 808 | if (is_dp) |
809 | args.v1.usPixelClock = | ||
810 | cpu_to_le16(dig_connector->dp_clock / 10); | ||
811 | else if (radeon_encoder->pixel_clock > 165000) | ||
729 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | 812 | args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); |
730 | else | 813 | else |
731 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 814 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
732 | } | 815 | } |
733 | if (ASIC_IS_DCE32(rdev)) { | 816 | if (ASIC_IS_DCE32(rdev)) { |
734 | if (radeon_encoder->pixel_clock > 165000) | ||
735 | args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); | ||
736 | if (dig->dig_block) | 817 | if (dig->dig_block) |
737 | args.v2.acConfig.ucEncoderSel = 1; | 818 | args.v2.acConfig.ucEncoderSel = 1; |
819 | if (dig_connector->linkb) | ||
820 | args.v2.acConfig.ucLinkSel = 1; | ||
738 | 821 | ||
739 | switch (radeon_encoder->encoder_id) { | 822 | switch (radeon_encoder->encoder_id) { |
740 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 823 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
751 | break; | 834 | break; |
752 | } | 835 | } |
753 | 836 | ||
754 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 837 | if (is_dp) |
838 | args.v2.acConfig.fCoherentMode = 1; | ||
839 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
755 | if (dig->coherent_mode) | 840 | if (dig->coherent_mode) |
756 | args.v2.acConfig.fCoherentMode = 1; | 841 | args.v2.acConfig.fCoherentMode = 1; |
757 | } | 842 | } |
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
760 | 845 | ||
761 | switch (radeon_encoder->encoder_id) { | 846 | switch (radeon_encoder->encoder_id) { |
762 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 847 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
763 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | 848 | /* XXX doesn't really matter which dig encoder we pick as long as it's |
849 | * not already in use | ||
850 | */ | ||
851 | if (dig_connector->linkb) | ||
852 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | ||
853 | else | ||
854 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; | ||
764 | if (rdev->flags & RADEON_IS_IGP) { | 855 | if (rdev->flags & RADEON_IS_IGP) { |
765 | if (radeon_encoder->pixel_clock > 165000) { | 856 | if (radeon_encoder->pixel_clock > 165000) { |
766 | args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | | ||
767 | ATOM_TRANSMITTER_CONFIG_LINKA_B); | ||
768 | if (dig_connector->igp_lane_info & 0x3) | 857 | if (dig_connector->igp_lane_info & 0x3) |
769 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; | 858 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; |
770 | else if (dig_connector->igp_lane_info & 0xc) | 859 | else if (dig_connector->igp_lane_info & 0xc) |
771 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; | 860 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; |
772 | } else { | 861 | } else { |
773 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | ||
774 | if (dig_connector->igp_lane_info & 0x1) | 862 | if (dig_connector->igp_lane_info & 0x1) |
775 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; | 863 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; |
776 | else if (dig_connector->igp_lane_info & 0x2) | 864 | else if (dig_connector->igp_lane_info & 0x2) |
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) | |||
780 | else if (dig_connector->igp_lane_info & 0x8) | 868 | else if (dig_connector->igp_lane_info & 0x8) |
781 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; | 869 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; |
782 | } | 870 | } |
783 | } else { | ||
784 | if (radeon_encoder->pixel_clock > 165000) | ||
785 | args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | | ||
786 | ATOM_TRANSMITTER_CONFIG_LINKA_B | | ||
787 | ATOM_TRANSMITTER_CONFIG_LANE_0_7); | ||
788 | else { | ||
789 | if (dig_connector->linkb) | ||
790 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
791 | else | ||
792 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
793 | } | ||
794 | } | 871 | } |
795 | break; | 872 | break; |
796 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 873 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
874 | /* Only dig2 encoder can drive LVTMA */ | ||
797 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; | 875 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; |
798 | if (radeon_encoder->pixel_clock > 165000) | ||
799 | args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK | | ||
800 | ATOM_TRANSMITTER_CONFIG_LINKA_B | | ||
801 | ATOM_TRANSMITTER_CONFIG_LANE_0_7); | ||
802 | else { | ||
803 | if (dig_connector->linkb) | ||
804 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
805 | else | ||
806 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3; | ||
807 | } | ||
808 | break; | 876 | break; |
809 | } | 877 | } |
810 | 878 | ||
811 | if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | 879 | if (radeon_encoder->pixel_clock > 165000) |
880 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK; | ||
881 | |||
882 | if (dig_connector->linkb) | ||
883 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB; | ||
884 | else | ||
885 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA; | ||
886 | |||
887 | if (is_dp) | ||
888 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | ||
889 | else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { | ||
812 | if (dig->coherent_mode) | 890 | if (dig->coherent_mode) |
813 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; | 891 | args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; |
814 | } | 892 | } |
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
918 | if (is_dig) { | 996 | if (is_dig) { |
919 | switch (mode) { | 997 | switch (mode) { |
920 | case DRM_MODE_DPMS_ON: | 998 | case DRM_MODE_DPMS_ON: |
921 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); | 999 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); |
1000 | { | ||
1001 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1002 | dp_link_train(encoder, connector); | ||
1003 | } | ||
922 | break; | 1004 | break; |
923 | case DRM_MODE_DPMS_STANDBY: | 1005 | case DRM_MODE_DPMS_STANDBY: |
924 | case DRM_MODE_DPMS_SUSPEND: | 1006 | case DRM_MODE_DPMS_SUSPEND: |
925 | case DRM_MODE_DPMS_OFF: | 1007 | case DRM_MODE_DPMS_OFF: |
926 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); | 1008 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); |
927 | break; | 1009 | break; |
928 | } | 1010 | } |
929 | } else { | 1011 | } else { |
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1025 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | 1107 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
1026 | else | 1108 | else |
1027 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | 1109 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; |
1028 | } else | 1110 | } else { |
1029 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | 1111 | struct drm_connector *connector; |
1112 | struct radeon_connector *radeon_connector; | ||
1113 | struct radeon_connector_atom_dig *dig_connector; | ||
1114 | |||
1115 | connector = radeon_get_connector_for_encoder(encoder); | ||
1116 | if (!connector) | ||
1117 | return; | ||
1118 | radeon_connector = to_radeon_connector(connector); | ||
1119 | if (!radeon_connector->con_priv) | ||
1120 | return; | ||
1121 | dig_connector = radeon_connector->con_priv; | ||
1122 | |||
1123 | /* XXX doesn't really matter which dig encoder we pick as long as it's | ||
1124 | * not already in use | ||
1125 | */ | ||
1126 | if (dig_connector->linkb) | ||
1127 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | ||
1128 | else | ||
1129 | args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; | ||
1130 | } | ||
1030 | break; | 1131 | break; |
1031 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: | 1132 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: |
1032 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; | 1133 | args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; |
1033 | break; | 1134 | break; |
1034 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 1135 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1136 | /* Only dig2 encoder can drive LVTMA */ | ||
1035 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; | 1137 | args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
1036 | break; | 1138 | break; |
1037 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: | 1139 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1104 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 1206 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
1105 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 1207 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
1106 | 1208 | ||
1107 | if (radeon_encoder->enc_priv) { | 1209 | if (radeon_encoder->active_device & |
1108 | struct radeon_encoder_atom_dig *dig; | 1210 | (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { |
1211 | if (radeon_encoder->enc_priv) { | ||
1212 | struct radeon_encoder_atom_dig *dig; | ||
1109 | 1213 | ||
1110 | dig = radeon_encoder->enc_priv; | 1214 | dig = radeon_encoder->enc_priv; |
1111 | dig->dig_block = radeon_crtc->crtc_id; | 1215 | dig->dig_block = radeon_crtc->crtc_id; |
1216 | } | ||
1112 | } | 1217 | } |
1113 | radeon_encoder->pixel_clock = adjusted_mode->clock; | 1218 | radeon_encoder->pixel_clock = adjusted_mode->clock; |
1114 | 1219 | ||
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, | |||
1134 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: | 1239 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1135 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: | 1240 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1136 | /* disable the encoder and transmitter */ | 1241 | /* disable the encoder and transmitter */ |
1137 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); | 1242 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); |
1138 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE); | 1243 | atombios_dig_encoder_setup(encoder, ATOM_DISABLE); |
1139 | 1244 | ||
1140 | /* setup and enable the encoder and transmitter */ | 1245 | /* setup and enable the encoder and transmitter */ |
1141 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE); | 1246 | atombios_dig_encoder_setup(encoder, ATOM_ENABLE); |
1142 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); | 1247 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); |
1143 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); | 1248 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); |
1144 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); | 1249 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); |
1145 | break; | 1250 | break; |
1146 | case ENCODER_OBJECT_ID_INTERNAL_DDI: | 1251 | case ENCODER_OBJECT_ID_INTERNAL_DDI: |
1147 | atombios_ddia_setup(encoder, ATOM_ENABLE); | 1252 | atombios_ddia_setup(encoder, ATOM_ENABLE); |
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su | |||
1354 | encoder->possible_crtcs = 0x1; | 1459 | encoder->possible_crtcs = 0x1; |
1355 | else | 1460 | else |
1356 | encoder->possible_crtcs = 0x3; | 1461 | encoder->possible_crtcs = 0x3; |
1357 | encoder->possible_clones = 0; | ||
1358 | 1462 | ||
1359 | radeon_encoder->enc_priv = NULL; | 1463 | radeon_encoder->enc_priv = NULL; |
1360 | 1464 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index b38c4c8e2c61..66055b3d8668 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev, | |||
140 | struct radeon_framebuffer *rfb; | 140 | struct radeon_framebuffer *rfb; |
141 | struct drm_mode_fb_cmd mode_cmd; | 141 | struct drm_mode_fb_cmd mode_cmd; |
142 | struct drm_gem_object *gobj = NULL; | 142 | struct drm_gem_object *gobj = NULL; |
143 | struct radeon_object *robj = NULL; | 143 | struct radeon_bo *rbo = NULL; |
144 | struct device *device = &rdev->pdev->dev; | 144 | struct device *device = &rdev->pdev->dev; |
145 | int size, aligned_size, ret; | 145 | int size, aligned_size, ret; |
146 | u64 fb_gpuaddr; | 146 | u64 fb_gpuaddr; |
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev, | |||
168 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 168 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
169 | RADEON_GEM_DOMAIN_VRAM, | 169 | RADEON_GEM_DOMAIN_VRAM, |
170 | false, ttm_bo_type_kernel, | 170 | false, ttm_bo_type_kernel, |
171 | false, &gobj); | 171 | &gobj); |
172 | if (ret) { | 172 | if (ret) { |
173 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", | 173 | printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
174 | surface_width, surface_height); | 174 | surface_width, surface_height); |
175 | ret = -ENOMEM; | 175 | ret = -ENOMEM; |
176 | goto out; | 176 | goto out; |
177 | } | 177 | } |
178 | robj = gobj->driver_private; | 178 | rbo = gobj->driver_private; |
179 | 179 | ||
180 | if (fb_tiled) | 180 | if (fb_tiled) |
181 | tiling_flags = RADEON_TILING_MACRO; | 181 | tiling_flags = RADEON_TILING_MACRO; |
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev, | |||
192 | } | 192 | } |
193 | #endif | 193 | #endif |
194 | 194 | ||
195 | if (tiling_flags) | 195 | if (tiling_flags) { |
196 | radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); | 196 | ret = radeon_bo_set_tiling_flags(rbo, |
197 | tiling_flags | RADEON_TILING_SURFACE, | ||
198 | mode_cmd.pitch); | ||
199 | if (ret) | ||
200 | dev_err(rdev->dev, "FB failed to set tiling flags\n"); | ||
201 | } | ||
197 | mutex_lock(&rdev->ddev->struct_mutex); | 202 | mutex_lock(&rdev->ddev->struct_mutex); |
198 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); | 203 | fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
199 | if (fb == NULL) { | 204 | if (fb == NULL) { |
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev, | |||
201 | ret = -ENOMEM; | 206 | ret = -ENOMEM; |
202 | goto out_unref; | 207 | goto out_unref; |
203 | } | 208 | } |
204 | ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | 209 | ret = radeon_bo_reserve(rbo, false); |
210 | if (unlikely(ret != 0)) | ||
211 | goto out_unref; | ||
212 | ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); | ||
213 | if (ret) { | ||
214 | radeon_bo_unreserve(rbo); | ||
215 | goto out_unref; | ||
216 | } | ||
217 | if (fb_tiled) | ||
218 | radeon_bo_check_tiling(rbo, 0, 0); | ||
219 | ret = radeon_bo_kmap(rbo, &fbptr); | ||
220 | radeon_bo_unreserve(rbo); | ||
205 | if (ret) { | 221 | if (ret) { |
206 | printk(KERN_ERR "failed to pin framebuffer\n"); | ||
207 | ret = -ENOMEM; | ||
208 | goto out_unref; | 222 | goto out_unref; |
209 | } | 223 | } |
210 | 224 | ||
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev, | |||
213 | *fb_p = fb; | 227 | *fb_p = fb; |
214 | rfb = to_radeon_framebuffer(fb); | 228 | rfb = to_radeon_framebuffer(fb); |
215 | rdev->fbdev_rfb = rfb; | 229 | rdev->fbdev_rfb = rfb; |
216 | rdev->fbdev_robj = robj; | 230 | rdev->fbdev_rbo = rbo; |
217 | 231 | ||
218 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); | 232 | info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); |
219 | if (info == NULL) { | 233 | if (info == NULL) { |
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev, | |||
234 | if (ret) | 248 | if (ret) |
235 | goto out_unref; | 249 | goto out_unref; |
236 | 250 | ||
237 | if (fb_tiled) | 251 | memset_io(fbptr, 0xff, aligned_size); |
238 | radeon_object_check_tiling(robj, 0, 0); | ||
239 | |||
240 | ret = radeon_object_kmap(robj, &fbptr); | ||
241 | if (ret) { | ||
242 | goto out_unref; | ||
243 | } | ||
244 | |||
245 | memset_io(fbptr, 0, aligned_size); | ||
246 | 252 | ||
247 | strcpy(info->fix.id, "radeondrmfb"); | 253 | strcpy(info->fix.id, "radeondrmfb"); |
248 | 254 | ||
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev, | |||
288 | return 0; | 294 | return 0; |
289 | 295 | ||
290 | out_unref: | 296 | out_unref: |
291 | if (robj) { | 297 | if (rbo) { |
292 | radeon_object_kunmap(robj); | 298 | ret = radeon_bo_reserve(rbo, false); |
299 | if (likely(ret == 0)) { | ||
300 | radeon_bo_kunmap(rbo); | ||
301 | radeon_bo_unreserve(rbo); | ||
302 | } | ||
293 | } | 303 | } |
294 | if (fb && ret) { | 304 | if (fb && ret) { |
295 | list_del(&fb->filp_head); | 305 | list_del(&fb->filp_head); |
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options) | |||
321 | 331 | ||
322 | int radeonfb_probe(struct drm_device *dev) | 332 | int radeonfb_probe(struct drm_device *dev) |
323 | { | 333 | { |
324 | return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); | 334 | struct radeon_device *rdev = dev->dev_private; |
335 | int bpp_sel = 32; | ||
336 | |||
337 | /* select 8 bpp console on RN50 or 16MB cards */ | ||
338 | if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) | ||
339 | bpp_sel = 8; | ||
340 | |||
341 | return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create); | ||
325 | } | 342 | } |
326 | 343 | ||
327 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | 344 | int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) |
328 | { | 345 | { |
329 | struct fb_info *info; | 346 | struct fb_info *info; |
330 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); | 347 | struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); |
331 | struct radeon_object *robj; | 348 | struct radeon_bo *rbo; |
349 | int r; | ||
332 | 350 | ||
333 | if (!fb) { | 351 | if (!fb) { |
334 | return -EINVAL; | 352 | return -EINVAL; |
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) | |||
336 | info = fb->fbdev; | 354 | info = fb->fbdev; |
337 | if (info) { | 355 | if (info) { |
338 | struct radeon_fb_device *rfbdev = info->par; | 356 | struct radeon_fb_device *rfbdev = info->par; |
339 | robj = rfb->obj->driver_private; | 357 | rbo = rfb->obj->driver_private; |
340 | unregister_framebuffer(info); | 358 | unregister_framebuffer(info); |
341 | radeon_object_kunmap(robj); | 359 | r = radeon_bo_reserve(rbo, false); |
342 | radeon_object_unpin(robj); | 360 | if (likely(r == 0)) { |
361 | radeon_bo_kunmap(rbo); | ||
362 | radeon_bo_unpin(rbo); | ||
363 | radeon_bo_unreserve(rbo); | ||
364 | } | ||
343 | drm_fb_helper_free(&rfbdev->helper); | 365 | drm_fb_helper_free(&rfbdev->helper); |
344 | framebuffer_release(info); | 366 | framebuffer_release(info); |
345 | } | 367 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 3beb26d74719..2ac31633d72c 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence) | |||
168 | return signaled; | 168 | return signaled; |
169 | } | 169 | } |
170 | 170 | ||
171 | int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy) | ||
172 | { | ||
173 | struct radeon_device *rdev; | ||
174 | int ret = 0; | ||
175 | |||
176 | rdev = fence->rdev; | ||
177 | |||
178 | __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
179 | |||
180 | while (1) { | ||
181 | if (radeon_fence_signaled(fence)) | ||
182 | break; | ||
183 | |||
184 | if (time_after_eq(jiffies, fence->timeout)) { | ||
185 | ret = -EBUSY; | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | if (lazy) | ||
190 | schedule_timeout(1); | ||
191 | |||
192 | if (intr && signal_pending(current)) { | ||
193 | ret = -ERESTARTSYS; | ||
194 | break; | ||
195 | } | ||
196 | } | ||
197 | __set_current_state(TASK_RUNNING); | ||
198 | return ret; | ||
199 | } | ||
200 | |||
201 | |||
202 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) | 171 | int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
203 | { | 172 | { |
204 | struct radeon_device *rdev; | 173 | struct radeon_device *rdev; |
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr) | |||
216 | return 0; | 185 | return 0; |
217 | } | 186 | } |
218 | 187 | ||
219 | if (rdev->family >= CHIP_R600) { | ||
220 | r = r600_fence_wait(fence, intr, 0); | ||
221 | if (r == -ERESTARTSYS) | ||
222 | return -EBUSY; | ||
223 | return r; | ||
224 | } | ||
225 | |||
226 | retry: | 188 | retry: |
227 | cur_jiffies = jiffies; | 189 | cur_jiffies = jiffies; |
228 | timeout = HZ / 100; | 190 | timeout = HZ / 100; |
@@ -231,14 +193,18 @@ retry: | |||
231 | } | 193 | } |
232 | 194 | ||
233 | if (intr) { | 195 | if (intr) { |
196 | radeon_irq_kms_sw_irq_get(rdev); | ||
234 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, | 197 | r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
235 | radeon_fence_signaled(fence), timeout); | 198 | radeon_fence_signaled(fence), timeout); |
199 | radeon_irq_kms_sw_irq_put(rdev); | ||
236 | if (unlikely(r == -ERESTARTSYS)) { | 200 | if (unlikely(r == -ERESTARTSYS)) { |
237 | return -EBUSY; | 201 | return -EBUSY; |
238 | } | 202 | } |
239 | } else { | 203 | } else { |
204 | radeon_irq_kms_sw_irq_get(rdev); | ||
240 | r = wait_event_timeout(rdev->fence_drv.queue, | 205 | r = wait_event_timeout(rdev->fence_drv.queue, |
241 | radeon_fence_signaled(fence), timeout); | 206 | radeon_fence_signaled(fence), timeout); |
207 | radeon_irq_kms_sw_irq_put(rdev); | ||
242 | } | 208 | } |
243 | if (unlikely(!radeon_fence_signaled(fence))) { | 209 | if (unlikely(!radeon_fence_signaled(fence))) { |
244 | if (unlikely(r == 0)) { | 210 | if (unlikely(r == 0)) { |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index a68d7566178c..e73d56e83fa6 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) | |||
78 | int r; | 78 | int r; |
79 | 79 | ||
80 | if (rdev->gart.table.vram.robj == NULL) { | 80 | if (rdev->gart.table.vram.robj == NULL) { |
81 | r = radeon_object_create(rdev, NULL, | 81 | r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, |
82 | rdev->gart.table_size, | 82 | true, RADEON_GEM_DOMAIN_VRAM, |
83 | true, | 83 | &rdev->gart.table.vram.robj); |
84 | RADEON_GEM_DOMAIN_VRAM, | ||
85 | false, &rdev->gart.table.vram.robj); | ||
86 | if (r) { | 84 | if (r) { |
87 | return r; | 85 | return r; |
88 | } | 86 | } |
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev) | |||
95 | uint64_t gpu_addr; | 93 | uint64_t gpu_addr; |
96 | int r; | 94 | int r; |
97 | 95 | ||
98 | r = radeon_object_pin(rdev->gart.table.vram.robj, | 96 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
99 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); | 97 | if (unlikely(r != 0)) |
100 | if (r) { | ||
101 | radeon_object_unref(&rdev->gart.table.vram.robj); | ||
102 | return r; | 98 | return r; |
103 | } | 99 | r = radeon_bo_pin(rdev->gart.table.vram.robj, |
104 | r = radeon_object_kmap(rdev->gart.table.vram.robj, | 100 | RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
105 | (void **)&rdev->gart.table.vram.ptr); | ||
106 | if (r) { | 101 | if (r) { |
107 | radeon_object_unpin(rdev->gart.table.vram.robj); | 102 | radeon_bo_unreserve(rdev->gart.table.vram.robj); |
108 | radeon_object_unref(&rdev->gart.table.vram.robj); | ||
109 | DRM_ERROR("radeon: failed to map gart vram table.\n"); | ||
110 | return r; | 103 | return r; |
111 | } | 104 | } |
105 | r = radeon_bo_kmap(rdev->gart.table.vram.robj, | ||
106 | (void **)&rdev->gart.table.vram.ptr); | ||
107 | if (r) | ||
108 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
109 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
112 | rdev->gart.table_addr = gpu_addr; | 110 | rdev->gart.table_addr = gpu_addr; |
113 | return 0; | 111 | return r; |
114 | } | 112 | } |
115 | 113 | ||
116 | void radeon_gart_table_vram_free(struct radeon_device *rdev) | 114 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
117 | { | 115 | { |
116 | int r; | ||
117 | |||
118 | if (rdev->gart.table.vram.robj == NULL) { | 118 | if (rdev->gart.table.vram.robj == NULL) { |
119 | return; | 119 | return; |
120 | } | 120 | } |
121 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 121 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
122 | radeon_object_unpin(rdev->gart.table.vram.robj); | 122 | if (likely(r == 0)) { |
123 | radeon_object_unref(&rdev->gart.table.vram.robj); | 123 | radeon_bo_kunmap(rdev->gart.table.vram.robj); |
124 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
125 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
126 | } | ||
127 | radeon_bo_unref(&rdev->gart.table.vram.robj); | ||
124 | } | 128 | } |
125 | 129 | ||
126 | 130 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d880edf254db..e927f998f76f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj) | |||
38 | 38 | ||
39 | void radeon_gem_object_free(struct drm_gem_object *gobj) | 39 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
40 | { | 40 | { |
41 | struct radeon_object *robj = gobj->driver_private; | 41 | struct radeon_bo *robj = gobj->driver_private; |
42 | 42 | ||
43 | gobj->driver_private = NULL; | 43 | gobj->driver_private = NULL; |
44 | if (robj) { | 44 | if (robj) { |
45 | radeon_object_unref(&robj); | 45 | radeon_bo_unref(&robj); |
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, | 49 | int radeon_gem_object_create(struct radeon_device *rdev, int size, |
50 | int alignment, int initial_domain, | 50 | int alignment, int initial_domain, |
51 | bool discardable, bool kernel, | 51 | bool discardable, bool kernel, |
52 | bool interruptible, | 52 | struct drm_gem_object **obj) |
53 | struct drm_gem_object **obj) | ||
54 | { | 53 | { |
55 | struct drm_gem_object *gobj; | 54 | struct drm_gem_object *gobj; |
56 | struct radeon_object *robj; | 55 | struct radeon_bo *robj; |
57 | int r; | 56 | int r; |
58 | 57 | ||
59 | *obj = NULL; | 58 | *obj = NULL; |
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
65 | if (alignment < PAGE_SIZE) { | 64 | if (alignment < PAGE_SIZE) { |
66 | alignment = PAGE_SIZE; | 65 | alignment = PAGE_SIZE; |
67 | } | 66 | } |
68 | r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, | 67 | r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); |
69 | interruptible, &robj); | ||
70 | if (r) { | 68 | if (r) { |
71 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", | 69 | DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", |
72 | size, initial_domain, alignment); | 70 | size, initial_domain, alignment); |
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, | |||
83 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | 81 | int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
84 | uint64_t *gpu_addr) | 82 | uint64_t *gpu_addr) |
85 | { | 83 | { |
86 | struct radeon_object *robj = obj->driver_private; | 84 | struct radeon_bo *robj = obj->driver_private; |
87 | uint32_t flags; | 85 | int r; |
88 | 86 | ||
89 | switch (pin_domain) { | 87 | r = radeon_bo_reserve(robj, false); |
90 | case RADEON_GEM_DOMAIN_VRAM: | 88 | if (unlikely(r != 0)) |
91 | flags = TTM_PL_FLAG_VRAM; | 89 | return r; |
92 | break; | 90 | r = radeon_bo_pin(robj, pin_domain, gpu_addr); |
93 | case RADEON_GEM_DOMAIN_GTT: | 91 | radeon_bo_unreserve(robj); |
94 | flags = TTM_PL_FLAG_TT; | 92 | return r; |
95 | break; | ||
96 | default: | ||
97 | flags = TTM_PL_FLAG_SYSTEM; | ||
98 | break; | ||
99 | } | ||
100 | return radeon_object_pin(robj, flags, gpu_addr); | ||
101 | } | 93 | } |
102 | 94 | ||
103 | void radeon_gem_object_unpin(struct drm_gem_object *obj) | 95 | void radeon_gem_object_unpin(struct drm_gem_object *obj) |
104 | { | 96 | { |
105 | struct radeon_object *robj = obj->driver_private; | 97 | struct radeon_bo *robj = obj->driver_private; |
106 | radeon_object_unpin(robj); | 98 | int r; |
99 | |||
100 | r = radeon_bo_reserve(robj, false); | ||
101 | if (likely(r == 0)) { | ||
102 | radeon_bo_unpin(robj); | ||
103 | radeon_bo_unreserve(robj); | ||
104 | } | ||
107 | } | 105 | } |
108 | 106 | ||
109 | int radeon_gem_set_domain(struct drm_gem_object *gobj, | 107 | int radeon_gem_set_domain(struct drm_gem_object *gobj, |
110 | uint32_t rdomain, uint32_t wdomain) | 108 | uint32_t rdomain, uint32_t wdomain) |
111 | { | 109 | { |
112 | struct radeon_object *robj; | 110 | struct radeon_bo *robj; |
113 | uint32_t domain; | 111 | uint32_t domain; |
114 | int r; | 112 | int r; |
115 | 113 | ||
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj, | |||
127 | } | 125 | } |
128 | if (domain == RADEON_GEM_DOMAIN_CPU) { | 126 | if (domain == RADEON_GEM_DOMAIN_CPU) { |
129 | /* Asking for cpu access wait for object idle */ | 127 | /* Asking for cpu access wait for object idle */ |
130 | r = radeon_object_wait(robj); | 128 | r = radeon_bo_wait(robj, NULL, false); |
131 | if (r) { | 129 | if (r) { |
132 | printk(KERN_ERR "Failed to wait for object !\n"); | 130 | printk(KERN_ERR "Failed to wait for object !\n"); |
133 | return r; | 131 | return r; |
134 | } | 132 | } |
133 | radeon_hdp_flush(robj->rdev); | ||
135 | } | 134 | } |
136 | return 0; | 135 | return 0; |
137 | } | 136 | } |
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev) | |||
144 | 143 | ||
145 | void radeon_gem_fini(struct radeon_device *rdev) | 144 | void radeon_gem_fini(struct radeon_device *rdev) |
146 | { | 145 | { |
147 | radeon_object_force_delete(rdev); | 146 | radeon_bo_force_delete(rdev); |
148 | } | 147 | } |
149 | 148 | ||
150 | 149 | ||
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
158 | struct drm_radeon_gem_info *args = data; | 157 | struct drm_radeon_gem_info *args = data; |
159 | 158 | ||
160 | args->vram_size = rdev->mc.real_vram_size; | 159 | args->vram_size = rdev->mc.real_vram_size; |
161 | /* FIXME: report somethings that makes sense */ | 160 | args->vram_visible = rdev->mc.real_vram_size; |
162 | args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); | 161 | if (rdev->stollen_vga_memory) |
163 | args->gart_size = rdev->mc.gtt_size; | 162 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
163 | if (rdev->fbdev_rbo) | ||
164 | args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo); | ||
165 | args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 - | ||
166 | RADEON_IB_POOL_SIZE*64*1024; | ||
164 | return 0; | 167 | return 0; |
165 | } | 168 | } |
166 | 169 | ||
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
192 | /* create a gem object to contain this object in */ | 195 | /* create a gem object to contain this object in */ |
193 | args->size = roundup(args->size, PAGE_SIZE); | 196 | args->size = roundup(args->size, PAGE_SIZE); |
194 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | 197 | r = radeon_gem_object_create(rdev, args->size, args->alignment, |
195 | args->initial_domain, false, | 198 | args->initial_domain, false, |
196 | false, true, &gobj); | 199 | false, &gobj); |
197 | if (r) { | 200 | if (r) { |
198 | return r; | 201 | return r; |
199 | } | 202 | } |
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
218 | * just validate the BO into a certain domain */ | 221 | * just validate the BO into a certain domain */ |
219 | struct drm_radeon_gem_set_domain *args = data; | 222 | struct drm_radeon_gem_set_domain *args = data; |
220 | struct drm_gem_object *gobj; | 223 | struct drm_gem_object *gobj; |
221 | struct radeon_object *robj; | 224 | struct radeon_bo *robj; |
222 | int r; | 225 | int r; |
223 | 226 | ||
224 | /* for now if someone requests domain CPU - | 227 | /* for now if someone requests domain CPU - |
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
244 | { | 247 | { |
245 | struct drm_radeon_gem_mmap *args = data; | 248 | struct drm_radeon_gem_mmap *args = data; |
246 | struct drm_gem_object *gobj; | 249 | struct drm_gem_object *gobj; |
247 | struct radeon_object *robj; | 250 | struct radeon_bo *robj; |
248 | int r; | ||
249 | 251 | ||
250 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 252 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
251 | if (gobj == NULL) { | 253 | if (gobj == NULL) { |
252 | return -EINVAL; | 254 | return -EINVAL; |
253 | } | 255 | } |
254 | robj = gobj->driver_private; | 256 | robj = gobj->driver_private; |
255 | r = radeon_object_mmap(robj, &args->addr_ptr); | 257 | args->addr_ptr = radeon_bo_mmap_offset(robj); |
256 | mutex_lock(&dev->struct_mutex); | 258 | mutex_lock(&dev->struct_mutex); |
257 | drm_gem_object_unreference(gobj); | 259 | drm_gem_object_unreference(gobj); |
258 | mutex_unlock(&dev->struct_mutex); | 260 | mutex_unlock(&dev->struct_mutex); |
259 | return r; | 261 | return 0; |
260 | } | 262 | } |
261 | 263 | ||
262 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | 264 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
@@ -264,7 +266,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
264 | { | 266 | { |
265 | struct drm_radeon_gem_busy *args = data; | 267 | struct drm_radeon_gem_busy *args = data; |
266 | struct drm_gem_object *gobj; | 268 | struct drm_gem_object *gobj; |
267 | struct radeon_object *robj; | 269 | struct radeon_bo *robj; |
268 | int r; | 270 | int r; |
269 | uint32_t cur_placement; | 271 | uint32_t cur_placement; |
270 | 272 | ||
@@ -273,7 +275,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
273 | return -EINVAL; | 275 | return -EINVAL; |
274 | } | 276 | } |
275 | robj = gobj->driver_private; | 277 | robj = gobj->driver_private; |
276 | r = radeon_object_busy_domain(robj, &cur_placement); | 278 | r = radeon_bo_wait(robj, &cur_placement, true); |
277 | switch (cur_placement) { | 279 | switch (cur_placement) { |
278 | case TTM_PL_VRAM: | 280 | case TTM_PL_VRAM: |
279 | args->domain = RADEON_GEM_DOMAIN_VRAM; | 281 | args->domain = RADEON_GEM_DOMAIN_VRAM; |
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
297 | { | 299 | { |
298 | struct drm_radeon_gem_wait_idle *args = data; | 300 | struct drm_radeon_gem_wait_idle *args = data; |
299 | struct drm_gem_object *gobj; | 301 | struct drm_gem_object *gobj; |
300 | struct radeon_object *robj; | 302 | struct radeon_bo *robj; |
301 | int r; | 303 | int r; |
302 | 304 | ||
303 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 305 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
305 | return -EINVAL; | 307 | return -EINVAL; |
306 | } | 308 | } |
307 | robj = gobj->driver_private; | 309 | robj = gobj->driver_private; |
308 | r = radeon_object_wait(robj); | 310 | r = radeon_bo_wait(robj, NULL, false); |
309 | mutex_lock(&dev->struct_mutex); | 311 | mutex_lock(&dev->struct_mutex); |
310 | drm_gem_object_unreference(gobj); | 312 | drm_gem_object_unreference(gobj); |
311 | mutex_unlock(&dev->struct_mutex); | 313 | mutex_unlock(&dev->struct_mutex); |
314 | radeon_hdp_flush(robj->rdev); | ||
312 | return r; | 315 | return r; |
313 | } | 316 | } |
314 | 317 | ||
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
317 | { | 320 | { |
318 | struct drm_radeon_gem_set_tiling *args = data; | 321 | struct drm_radeon_gem_set_tiling *args = data; |
319 | struct drm_gem_object *gobj; | 322 | struct drm_gem_object *gobj; |
320 | struct radeon_object *robj; | 323 | struct radeon_bo *robj; |
321 | int r = 0; | 324 | int r = 0; |
322 | 325 | ||
323 | DRM_DEBUG("%d \n", args->handle); | 326 | DRM_DEBUG("%d \n", args->handle); |
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
325 | if (gobj == NULL) | 328 | if (gobj == NULL) |
326 | return -EINVAL; | 329 | return -EINVAL; |
327 | robj = gobj->driver_private; | 330 | robj = gobj->driver_private; |
328 | radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 331 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
329 | mutex_lock(&dev->struct_mutex); | 332 | mutex_lock(&dev->struct_mutex); |
330 | drm_gem_object_unreference(gobj); | 333 | drm_gem_object_unreference(gobj); |
331 | mutex_unlock(&dev->struct_mutex); | 334 | mutex_unlock(&dev->struct_mutex); |
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
337 | { | 340 | { |
338 | struct drm_radeon_gem_get_tiling *args = data; | 341 | struct drm_radeon_gem_get_tiling *args = data; |
339 | struct drm_gem_object *gobj; | 342 | struct drm_gem_object *gobj; |
340 | struct radeon_object *robj; | 343 | struct radeon_bo *rbo; |
341 | int r = 0; | 344 | int r = 0; |
342 | 345 | ||
343 | DRM_DEBUG("\n"); | 346 | DRM_DEBUG("\n"); |
344 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 347 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
345 | if (gobj == NULL) | 348 | if (gobj == NULL) |
346 | return -EINVAL; | 349 | return -EINVAL; |
347 | robj = gobj->driver_private; | 350 | rbo = gobj->driver_private; |
348 | radeon_object_get_tiling_flags(robj, &args->tiling_flags, | 351 | r = radeon_bo_reserve(rbo, false); |
349 | &args->pitch); | 352 | if (unlikely(r != 0)) |
353 | return r; | ||
354 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); | ||
355 | radeon_bo_unreserve(rbo); | ||
350 | mutex_lock(&dev->struct_mutex); | 356 | mutex_lock(&dev->struct_mutex); |
351 | drm_gem_object_unreference(gobj); | 357 | drm_gem_object_unreference(gobj); |
352 | mutex_unlock(&dev->struct_mutex); | 358 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index dd438d32e5c0..da3da1e89d00 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c | |||
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | 61 | ||
62 | void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) | 62 | void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) |
63 | { | 63 | { |
64 | struct radeon_device *rdev = radeon_connector->base.dev->dev_private; | 64 | struct radeon_device *rdev = i2c->dev->dev_private; |
65 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | ||
65 | uint32_t temp; | 66 | uint32_t temp; |
66 | struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec; | ||
67 | 67 | ||
68 | /* RV410 appears to have a bug where the hw i2c in reset | 68 | /* RV410 appears to have a bug where the hw i2c in reset |
69 | * holds the i2c port in a bad state - switch hw i2c away before | 69 | * holds the i2c port in a bad state - switch hw i2c away before |
70 | * doing DDC - do this for all r200s/r300s/r400s for safety sake | 70 | * doing DDC - do this for all r200s/r300s/r400s for safety sake |
71 | */ | 71 | */ |
72 | if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { | 72 | if (rec->hw_capable) { |
73 | if (rec->a_clk_reg == RADEON_GPIO_MONID) { | 73 | if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { |
74 | WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | | 74 | if (rec->a_clk_reg == RADEON_GPIO_MONID) { |
75 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); | 75 | WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | |
76 | } else { | 76 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); |
77 | WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | | 77 | } else { |
78 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); | 78 | WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | |
79 | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); | ||
80 | } | ||
79 | } | 81 | } |
80 | } | 82 | } |
81 | if (lock_state) { | ||
82 | temp = RREG32(rec->a_clk_reg); | ||
83 | temp &= ~(rec->a_clk_mask); | ||
84 | WREG32(rec->a_clk_reg, temp); | ||
85 | |||
86 | temp = RREG32(rec->a_data_reg); | ||
87 | temp &= ~(rec->a_data_mask); | ||
88 | WREG32(rec->a_data_reg, temp); | ||
89 | } | ||
90 | 83 | ||
84 | /* clear the output pin values */ | ||
85 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; | ||
86 | WREG32(rec->a_clk_reg, temp); | ||
87 | |||
88 | temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask; | ||
89 | WREG32(rec->a_data_reg, temp); | ||
90 | |||
91 | /* set the pins to input */ | ||
92 | temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; | ||
93 | WREG32(rec->en_clk_reg, temp); | ||
94 | |||
95 | temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask; | ||
96 | WREG32(rec->en_data_reg, temp); | ||
97 | |||
98 | /* mask the gpio pins for software use */ | ||
91 | temp = RREG32(rec->mask_clk_reg); | 99 | temp = RREG32(rec->mask_clk_reg); |
92 | if (lock_state) | 100 | if (lock_state) |
93 | temp |= rec->mask_clk_mask; | 101 | temp |= rec->mask_clk_mask; |
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv) | |||
112 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 120 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
113 | uint32_t val; | 121 | uint32_t val; |
114 | 122 | ||
115 | val = RREG32(rec->get_clk_reg); | 123 | /* read the value off the pin */ |
116 | val &= rec->get_clk_mask; | 124 | val = RREG32(rec->y_clk_reg); |
125 | val &= rec->y_clk_mask; | ||
117 | 126 | ||
118 | return (val != 0); | 127 | return (val != 0); |
119 | } | 128 | } |
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv) | |||
126 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 135 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
127 | uint32_t val; | 136 | uint32_t val; |
128 | 137 | ||
129 | val = RREG32(rec->get_data_reg); | 138 | /* read the value off the pin */ |
130 | val &= rec->get_data_mask; | 139 | val = RREG32(rec->y_data_reg); |
140 | val &= rec->y_data_mask; | ||
141 | |||
131 | return (val != 0); | 142 | return (val != 0); |
132 | } | 143 | } |
133 | 144 | ||
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock) | |||
138 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 149 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
139 | uint32_t val; | 150 | uint32_t val; |
140 | 151 | ||
141 | val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); | 152 | /* set pin direction */ |
142 | val |= clock ? 0 : rec->put_clk_mask; | 153 | val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask; |
143 | WREG32(rec->put_clk_reg, val); | 154 | val |= clock ? 0 : rec->en_clk_mask; |
155 | WREG32(rec->en_clk_reg, val); | ||
144 | } | 156 | } |
145 | 157 | ||
146 | static void set_data(void *i2c_priv, int data) | 158 | static void set_data(void *i2c_priv, int data) |
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data) | |||
150 | struct radeon_i2c_bus_rec *rec = &i2c->rec; | 162 | struct radeon_i2c_bus_rec *rec = &i2c->rec; |
151 | uint32_t val; | 163 | uint32_t val; |
152 | 164 | ||
153 | val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); | 165 | /* set pin direction */ |
154 | val |= data ? 0 : rec->put_data_mask; | 166 | val = RREG32(rec->en_data_reg) & ~rec->en_data_mask; |
155 | WREG32(rec->put_data_reg, val); | 167 | val |= data ? 0 : rec->en_data_mask; |
168 | WREG32(rec->en_data_reg, val); | ||
156 | } | 169 | } |
157 | 170 | ||
158 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 171 | struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
159 | struct radeon_i2c_bus_rec *rec, | 172 | struct radeon_i2c_bus_rec *rec, |
160 | const char *name) | 173 | const char *name) |
161 | { | 174 | { |
162 | struct radeon_i2c_chan *i2c; | 175 | struct radeon_i2c_chan *i2c; |
163 | int ret; | 176 | int ret; |
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | |||
167 | return NULL; | 180 | return NULL; |
168 | 181 | ||
169 | i2c->adapter.owner = THIS_MODULE; | 182 | i2c->adapter.owner = THIS_MODULE; |
170 | i2c->adapter.algo_data = &i2c->algo; | ||
171 | i2c->dev = dev; | 183 | i2c->dev = dev; |
172 | i2c->algo.setsda = set_data; | 184 | i2c_set_adapdata(&i2c->adapter, i2c); |
173 | i2c->algo.setscl = set_clock; | 185 | i2c->adapter.algo_data = &i2c->algo.bit; |
174 | i2c->algo.getsda = get_data; | 186 | i2c->algo.bit.setsda = set_data; |
175 | i2c->algo.getscl = get_clock; | 187 | i2c->algo.bit.setscl = set_clock; |
176 | i2c->algo.udelay = 20; | 188 | i2c->algo.bit.getsda = get_data; |
189 | i2c->algo.bit.getscl = get_clock; | ||
190 | i2c->algo.bit.udelay = 20; | ||
177 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always | 191 | /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always |
178 | * make this, 2 jiffies is a lot more reliable */ | 192 | * make this, 2 jiffies is a lot more reliable */ |
179 | i2c->algo.timeout = 2; | 193 | i2c->algo.bit.timeout = 2; |
180 | i2c->algo.data = i2c; | 194 | i2c->algo.bit.data = i2c; |
181 | i2c->rec = *rec; | 195 | i2c->rec = *rec; |
182 | i2c_set_adapdata(&i2c->adapter, i2c); | ||
183 | |||
184 | ret = i2c_bit_add_bus(&i2c->adapter); | 196 | ret = i2c_bit_add_bus(&i2c->adapter); |
185 | if (ret) { | 197 | if (ret) { |
186 | DRM_INFO("Failed to register i2c %s\n", name); | 198 | DRM_INFO("Failed to register i2c %s\n", name); |
@@ -194,6 +206,38 @@ out_free: | |||
194 | 206 | ||
195 | } | 207 | } |
196 | 208 | ||
209 | struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, | ||
210 | struct radeon_i2c_bus_rec *rec, | ||
211 | const char *name) | ||
212 | { | ||
213 | struct radeon_i2c_chan *i2c; | ||
214 | int ret; | ||
215 | |||
216 | i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); | ||
217 | if (i2c == NULL) | ||
218 | return NULL; | ||
219 | |||
220 | i2c->rec = *rec; | ||
221 | i2c->adapter.owner = THIS_MODULE; | ||
222 | i2c->dev = dev; | ||
223 | i2c_set_adapdata(&i2c->adapter, i2c); | ||
224 | i2c->adapter.algo_data = &i2c->algo.dp; | ||
225 | i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; | ||
226 | i2c->algo.dp.address = 0; | ||
227 | ret = i2c_dp_aux_add_bus(&i2c->adapter); | ||
228 | if (ret) { | ||
229 | DRM_INFO("Failed to register i2c %s\n", name); | ||
230 | goto out_free; | ||
231 | } | ||
232 | |||
233 | return i2c; | ||
234 | out_free: | ||
235 | kfree(i2c); | ||
236 | return NULL; | ||
237 | |||
238 | } | ||
239 | |||
240 | |||
197 | void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) | 241 | void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) |
198 | { | 242 | { |
199 | if (!i2c) | 243 | if (!i2c) |
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) | |||
207 | { | 251 | { |
208 | return NULL; | 252 | return NULL; |
209 | } | 253 | } |
254 | |||
255 | void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, | ||
256 | u8 slave_addr, | ||
257 | u8 addr, | ||
258 | u8 *val) | ||
259 | { | ||
260 | u8 out_buf[2]; | ||
261 | u8 in_buf[2]; | ||
262 | struct i2c_msg msgs[] = { | ||
263 | { | ||
264 | .addr = slave_addr, | ||
265 | .flags = 0, | ||
266 | .len = 1, | ||
267 | .buf = out_buf, | ||
268 | }, | ||
269 | { | ||
270 | .addr = slave_addr, | ||
271 | .flags = I2C_M_RD, | ||
272 | .len = 1, | ||
273 | .buf = in_buf, | ||
274 | } | ||
275 | }; | ||
276 | |||
277 | out_buf[0] = addr; | ||
278 | out_buf[1] = 0; | ||
279 | |||
280 | if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) { | ||
281 | *val = in_buf[0]; | ||
282 | DRM_DEBUG("val = 0x%02x\n", *val); | ||
283 | } else { | ||
284 | DRM_ERROR("i2c 0x%02x 0x%02x read failed\n", | ||
285 | addr, *val); | ||
286 | } | ||
287 | } | ||
288 | |||
289 | void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, | ||
290 | u8 slave_addr, | ||
291 | u8 addr, | ||
292 | u8 val) | ||
293 | { | ||
294 | uint8_t out_buf[2]; | ||
295 | struct i2c_msg msg = { | ||
296 | .addr = slave_addr, | ||
297 | .flags = 0, | ||
298 | .len = 2, | ||
299 | .buf = out_buf, | ||
300 | }; | ||
301 | |||
302 | out_buf[0] = addr; | ||
303 | out_buf[1] = val; | ||
304 | |||
305 | if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) | ||
306 | DRM_ERROR("i2c 0x%02x 0x%02x write failed\n", | ||
307 | addr, val); | ||
308 | } | ||
309 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index a0fe6232dcb6..9223296fe37b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c | |||
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS) | |||
39 | return radeon_irq_process(rdev); | 39 | return radeon_irq_process(rdev); |
40 | } | 40 | } |
41 | 41 | ||
42 | /* | ||
43 | * Handle hotplug events outside the interrupt handler proper. | ||
44 | */ | ||
45 | static void radeon_hotplug_work_func(struct work_struct *work) | ||
46 | { | ||
47 | struct radeon_device *rdev = container_of(work, struct radeon_device, | ||
48 | hotplug_work); | ||
49 | struct drm_device *dev = rdev->ddev; | ||
50 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
51 | struct drm_connector *connector; | ||
52 | |||
53 | if (mode_config->num_connector) { | ||
54 | list_for_each_entry(connector, &mode_config->connector_list, head) | ||
55 | radeon_connector_hotplug(connector); | ||
56 | } | ||
57 | /* Just fire off a uevent and let userspace tell us what to do */ | ||
58 | drm_sysfs_hotplug_event(dev); | ||
59 | } | ||
60 | |||
42 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev) | 61 | void radeon_driver_irq_preinstall_kms(struct drm_device *dev) |
43 | { | 62 | { |
44 | struct radeon_device *rdev = dev->dev_private; | 63 | struct radeon_device *rdev = dev->dev_private; |
45 | unsigned i; | 64 | unsigned i; |
46 | 65 | ||
66 | INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func); | ||
67 | |||
47 | /* Disable *all* interrupts */ | 68 | /* Disable *all* interrupts */ |
48 | rdev->irq.sw_int = false; | 69 | rdev->irq.sw_int = false; |
49 | for (i = 0; i < 2; i++) { | 70 | for (i = 0; i < 2; i++) { |
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev) | |||
87 | 108 | ||
88 | if (rdev->flags & RADEON_SINGLE_CRTC) | 109 | if (rdev->flags & RADEON_SINGLE_CRTC) |
89 | num_crtc = 1; | 110 | num_crtc = 1; |
90 | 111 | spin_lock_init(&rdev->irq.sw_lock); | |
91 | r = drm_vblank_init(rdev->ddev, num_crtc); | 112 | r = drm_vblank_init(rdev->ddev, num_crtc); |
92 | if (r) { | 113 | if (r) { |
93 | return r; | 114 | return r; |
94 | } | 115 | } |
95 | /* enable msi */ | 116 | /* enable msi */ |
96 | rdev->msi_enabled = 0; | 117 | rdev->msi_enabled = 0; |
97 | if (rdev->family >= CHIP_RV380) { | 118 | /* MSIs don't seem to work on my rs780; |
119 | * not sure about rs880 or other rs780s. | ||
120 | * Needs more investigation. | ||
121 | */ | ||
122 | if ((rdev->family >= CHIP_RV380) && | ||
123 | (rdev->family != CHIP_RS780) && | ||
124 | (rdev->family != CHIP_RS880)) { | ||
98 | int ret = pci_enable_msi(rdev->pdev); | 125 | int ret = pci_enable_msi(rdev->pdev); |
99 | if (!ret) | 126 | if (!ret) { |
100 | rdev->msi_enabled = 1; | 127 | rdev->msi_enabled = 1; |
128 | DRM_INFO("radeon: using MSI.\n"); | ||
129 | } | ||
101 | } | 130 | } |
102 | drm_irq_install(rdev->ddev); | 131 | drm_irq_install(rdev->ddev); |
103 | rdev->irq.installed = true; | 132 | rdev->irq.installed = true; |
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev) | |||
114 | pci_disable_msi(rdev->pdev); | 143 | pci_disable_msi(rdev->pdev); |
115 | } | 144 | } |
116 | } | 145 | } |
146 | |||
147 | void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev) | ||
148 | { | ||
149 | unsigned long irqflags; | ||
150 | |||
151 | spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); | ||
152 | if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) { | ||
153 | rdev->irq.sw_int = true; | ||
154 | radeon_irq_set(rdev); | ||
155 | } | ||
156 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | ||
157 | } | ||
158 | |||
159 | void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev) | ||
160 | { | ||
161 | unsigned long irqflags; | ||
162 | |||
163 | spin_lock_irqsave(&rdev->irq.sw_lock, irqflags); | ||
164 | BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0); | ||
165 | if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) { | ||
166 | rdev->irq.sw_int = false; | ||
167 | radeon_irq_set(rdev); | ||
168 | } | ||
169 | spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags); | ||
170 | } | ||
171 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 8d0b7aa87fa4..b82ede98e152 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -30,6 +30,18 @@ | |||
30 | #include "radeon.h" | 30 | #include "radeon.h" |
31 | #include "atom.h" | 31 | #include "atom.h" |
32 | 32 | ||
33 | static void radeon_overscan_setup(struct drm_crtc *crtc, | ||
34 | struct drm_display_mode *mode) | ||
35 | { | ||
36 | struct drm_device *dev = crtc->dev; | ||
37 | struct radeon_device *rdev = dev->dev_private; | ||
38 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | ||
39 | |||
40 | WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0); | ||
41 | WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0); | ||
42 | WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0); | ||
43 | } | ||
44 | |||
33 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | 45 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, |
34 | struct drm_display_mode *mode, | 46 | struct drm_display_mode *mode, |
35 | struct drm_display_mode *adjusted_mode) | 47 | struct drm_display_mode *adjusted_mode) |
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
292 | uint32_t mask; | 304 | uint32_t mask; |
293 | 305 | ||
294 | if (radeon_crtc->crtc_id) | 306 | if (radeon_crtc->crtc_id) |
295 | mask = (RADEON_CRTC2_EN | | 307 | mask = (RADEON_CRTC2_DISP_DIS | |
296 | RADEON_CRTC2_DISP_DIS | | ||
297 | RADEON_CRTC2_VSYNC_DIS | | 308 | RADEON_CRTC2_VSYNC_DIS | |
298 | RADEON_CRTC2_HSYNC_DIS | | 309 | RADEON_CRTC2_HSYNC_DIS | |
299 | RADEON_CRTC2_DISP_REQ_EN_B); | 310 | RADEON_CRTC2_DISP_REQ_EN_B); |
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
305 | switch (mode) { | 316 | switch (mode) { |
306 | case DRM_MODE_DPMS_ON: | 317 | case DRM_MODE_DPMS_ON: |
307 | if (radeon_crtc->crtc_id) | 318 | if (radeon_crtc->crtc_id) |
308 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); | 319 | WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask)); |
309 | else { | 320 | else { |
310 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | | 321 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | |
311 | RADEON_CRTC_DISP_REQ_EN_B)); | 322 | RADEON_CRTC_DISP_REQ_EN_B)); |
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
319 | case DRM_MODE_DPMS_OFF: | 330 | case DRM_MODE_DPMS_OFF: |
320 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); | 331 | drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
321 | if (radeon_crtc->crtc_id) | 332 | if (radeon_crtc->crtc_id) |
322 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); | 333 | WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); |
323 | else { | 334 | else { |
324 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | | 335 | WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | |
325 | RADEON_CRTC_DISP_REQ_EN_B)); | 336 | RADEON_CRTC_DISP_REQ_EN_B)); |
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
400 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); | 411 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
401 | struct radeon_framebuffer *radeon_fb; | 412 | struct radeon_framebuffer *radeon_fb; |
402 | struct drm_gem_object *obj; | 413 | struct drm_gem_object *obj; |
414 | struct radeon_bo *rbo; | ||
403 | uint64_t base; | 415 | uint64_t base; |
404 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; | 416 | uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
405 | uint32_t crtc_pitch, pitch_pixels; | 417 | uint32_t crtc_pitch, pitch_pixels; |
406 | uint32_t tiling_flags; | 418 | uint32_t tiling_flags; |
407 | int format; | 419 | int format; |
408 | uint32_t gen_cntl_reg, gen_cntl_val; | 420 | uint32_t gen_cntl_reg, gen_cntl_val; |
421 | int r; | ||
409 | 422 | ||
410 | DRM_DEBUG("\n"); | 423 | DRM_DEBUG("\n"); |
424 | /* no fb bound */ | ||
425 | if (!crtc->fb) { | ||
426 | DRM_DEBUG("No FB bound\n"); | ||
427 | return 0; | ||
428 | } | ||
411 | 429 | ||
412 | radeon_fb = to_radeon_framebuffer(crtc->fb); | 430 | radeon_fb = to_radeon_framebuffer(crtc->fb); |
413 | 431 | ||
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
431 | return false; | 449 | return false; |
432 | } | 450 | } |
433 | 451 | ||
452 | /* Pin framebuffer & get tilling informations */ | ||
434 | obj = radeon_fb->obj; | 453 | obj = radeon_fb->obj; |
435 | if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { | 454 | rbo = obj->driver_private; |
455 | r = radeon_bo_reserve(rbo, false); | ||
456 | if (unlikely(r != 0)) | ||
457 | return r; | ||
458 | r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base); | ||
459 | if (unlikely(r != 0)) { | ||
460 | radeon_bo_unreserve(rbo); | ||
436 | return -EINVAL; | 461 | return -EINVAL; |
437 | } | 462 | } |
463 | radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); | ||
464 | radeon_bo_unreserve(rbo); | ||
465 | if (tiling_flags & RADEON_TILING_MICRO) | ||
466 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
467 | |||
438 | /* if scanout was in GTT this really wouldn't work */ | 468 | /* if scanout was in GTT this really wouldn't work */ |
439 | /* crtc offset is from display base addr not FB location */ | 469 | /* crtc offset is from display base addr not FB location */ |
440 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; | 470 | radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; |
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
449 | (crtc->fb->bits_per_pixel * 8)); | 479 | (crtc->fb->bits_per_pixel * 8)); |
450 | crtc_pitch |= crtc_pitch << 16; | 480 | crtc_pitch |= crtc_pitch << 16; |
451 | 481 | ||
452 | radeon_object_get_tiling_flags(obj->driver_private, | ||
453 | &tiling_flags, NULL); | ||
454 | if (tiling_flags & RADEON_TILING_MICRO) | ||
455 | DRM_ERROR("trying to scanout microtiled buffer\n"); | ||
456 | 482 | ||
457 | if (tiling_flags & RADEON_TILING_MACRO) { | 483 | if (tiling_flags & RADEON_TILING_MACRO) { |
458 | if (ASIC_IS_R300(rdev)) | 484 | if (ASIC_IS_R300(rdev)) |
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, | |||
530 | 556 | ||
531 | if (old_fb && old_fb != crtc->fb) { | 557 | if (old_fb && old_fb != crtc->fb) { |
532 | radeon_fb = to_radeon_framebuffer(old_fb); | 558 | radeon_fb = to_radeon_framebuffer(old_fb); |
533 | radeon_gem_object_unpin(radeon_fb->obj); | 559 | rbo = radeon_fb->obj->driver_private; |
560 | r = radeon_bo_reserve(rbo, false); | ||
561 | if (unlikely(r != 0)) | ||
562 | return r; | ||
563 | radeon_bo_unpin(rbo); | ||
564 | radeon_bo_unreserve(rbo); | ||
534 | } | 565 | } |
535 | 566 | ||
536 | /* Bytes per pixel may have changed */ | 567 | /* Bytes per pixel may have changed */ |
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
642 | uint32_t crtc2_gen_cntl; | 673 | uint32_t crtc2_gen_cntl; |
643 | uint32_t disp2_merge_cntl; | 674 | uint32_t disp2_merge_cntl; |
644 | 675 | ||
645 | /* check to see if TV DAC is enabled for another crtc and keep it enabled */ | 676 | /* if TV DAC is enabled for another crtc and keep it enabled */ |
646 | if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) | 677 | crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080; |
647 | crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON; | ||
648 | else | ||
649 | crtc2_gen_cntl = 0; | ||
650 | |||
651 | crtc2_gen_cntl |= ((format << 8) | 678 | crtc2_gen_cntl |= ((format << 8) |
652 | | RADEON_CRTC2_VSYNC_DIS | 679 | | RADEON_CRTC2_VSYNC_DIS |
653 | | RADEON_CRTC2_HSYNC_DIS | 680 | | RADEON_CRTC2_HSYNC_DIS |
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod | |||
676 | uint32_t crtc_ext_cntl; | 703 | uint32_t crtc_ext_cntl; |
677 | uint32_t disp_merge_cntl; | 704 | uint32_t disp_merge_cntl; |
678 | 705 | ||
679 | crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN | 706 | crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000; |
707 | crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN | ||
680 | | (format << 8) | 708 | | (format << 8) |
681 | | RADEON_CRTC_DISP_REQ_EN_B | 709 | | RADEON_CRTC_DISP_REQ_EN_B |
682 | | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) | 710 | | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) |
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | |||
779 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) | 807 | if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
780 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; | 808 | pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; |
781 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { | 809 | if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { |
782 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 810 | if (!rdev->is_atom_bios) { |
783 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; | 811 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
784 | if (lvds) { | 812 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; |
785 | if (lvds->use_bios_dividers) { | 813 | if (lvds) { |
786 | pll_ref_div = lvds->panel_ref_divider; | 814 | if (lvds->use_bios_dividers) { |
787 | pll_fb_post_div = (lvds->panel_fb_divider | | 815 | pll_ref_div = lvds->panel_ref_divider; |
788 | (lvds->panel_post_divider << 16)); | 816 | pll_fb_post_div = (lvds->panel_fb_divider | |
789 | htotal_cntl = 0; | 817 | (lvds->panel_post_divider << 16)); |
790 | use_bios_divs = true; | 818 | htotal_cntl = 0; |
819 | use_bios_divs = true; | ||
820 | } | ||
791 | } | 821 | } |
792 | } | 822 | } |
793 | pll_flags |= RADEON_PLL_USE_REF_DIV; | 823 | pll_flags |= RADEON_PLL_USE_REF_DIV; |
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
1027 | radeon_crtc_set_base(crtc, x, y, old_fb); | 1057 | radeon_crtc_set_base(crtc, x, y, old_fb); |
1028 | radeon_set_crtc_timing(crtc, adjusted_mode); | 1058 | radeon_set_crtc_timing(crtc, adjusted_mode); |
1029 | radeon_set_pll(crtc, adjusted_mode); | 1059 | radeon_set_pll(crtc, adjusted_mode); |
1060 | radeon_overscan_setup(crtc, adjusted_mode); | ||
1030 | if (radeon_crtc->crtc_id == 0) { | 1061 | if (radeon_crtc->crtc_id == 0) { |
1031 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | 1062 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); |
1032 | } else { | 1063 | } else { |
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
1042 | 1073 | ||
1043 | static void radeon_crtc_prepare(struct drm_crtc *crtc) | 1074 | static void radeon_crtc_prepare(struct drm_crtc *crtc) |
1044 | { | 1075 | { |
1045 | radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); | 1076 | struct drm_device *dev = crtc->dev; |
1077 | struct drm_crtc *crtci; | ||
1078 | |||
1079 | /* | ||
1080 | * The hardware wedges sometimes if you reconfigure one CRTC | ||
1081 | * whilst another is running (see fdo bug #24611). | ||
1082 | */ | ||
1083 | list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) | ||
1084 | radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF); | ||
1046 | } | 1085 | } |
1047 | 1086 | ||
1048 | static void radeon_crtc_commit(struct drm_crtc *crtc) | 1087 | static void radeon_crtc_commit(struct drm_crtc *crtc) |
1049 | { | 1088 | { |
1050 | radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); | 1089 | struct drm_device *dev = crtc->dev; |
1090 | struct drm_crtc *crtci; | ||
1091 | |||
1092 | /* | ||
1093 | * Reenable the CRTCs that should be running. | ||
1094 | */ | ||
1095 | list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) { | ||
1096 | if (crtci->enabled) | ||
1097 | radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); | ||
1098 | } | ||
1051 | } | 1099 | } |
1052 | 1100 | ||
1053 | static const struct drm_crtc_helper_funcs legacy_helper_funcs = { | 1101 | static const struct drm_crtc_helper_funcs legacy_helper_funcs = { |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 00382122869b..df00515e81fa 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
136 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; | 136 | lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
137 | 137 | ||
138 | lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); | 138 | lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); |
139 | if ((!rdev->is_atom_bios)) { | 139 | if (rdev->is_atom_bios) { |
140 | /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl | ||
141 | * need to call that on resume to set up the reg properly. | ||
142 | */ | ||
143 | radeon_encoder->pixel_clock = adjusted_mode->clock; | ||
144 | atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE); | ||
145 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
146 | } else { | ||
140 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; | 147 | struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; |
141 | if (lvds) { | 148 | if (lvds) { |
142 | DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); | 149 | DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); |
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
147 | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); | 154 | (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); |
148 | } else | 155 | } else |
149 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | 156 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); |
150 | } else | 157 | } |
151 | lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); | ||
152 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; | 158 | lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; |
153 | lvds_gen_cntl &= ~(RADEON_LVDS_ON | | 159 | lvds_gen_cntl &= ~(RADEON_LVDS_ON | |
154 | RADEON_LVDS_BLON | | 160 | RADEON_LVDS_BLON | |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, | |||
184 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 190 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
185 | } | 191 | } |
186 | 192 | ||
187 | static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | 193 | static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, |
188 | struct drm_display_mode *mode, | 194 | struct drm_display_mode *mode, |
189 | struct drm_display_mode *adjusted_mode) | 195 | struct drm_display_mode *adjusted_mode) |
190 | { | 196 | { |
191 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 197 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
192 | 198 | ||
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, | |||
194 | radeon_encoder_set_active_device(encoder); | 200 | radeon_encoder_set_active_device(encoder); |
195 | drm_mode_set_crtcinfo(adjusted_mode, 0); | 201 | drm_mode_set_crtcinfo(adjusted_mode, 0); |
196 | 202 | ||
197 | if (radeon_encoder->rmx_type != RMX_OFF) | 203 | /* get the native mode for LVDS */ |
198 | radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); | 204 | if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { |
205 | struct drm_display_mode *native_mode = &radeon_encoder->native_mode; | ||
206 | int mode_id = adjusted_mode->base.id; | ||
207 | *adjusted_mode = *native_mode; | ||
208 | adjusted_mode->hdisplay = mode->hdisplay; | ||
209 | adjusted_mode->vdisplay = mode->vdisplay; | ||
210 | adjusted_mode->base.id = mode_id; | ||
211 | } | ||
199 | 212 | ||
200 | return true; | 213 | return true; |
201 | } | 214 | } |
202 | 215 | ||
203 | static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | 216 | static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { |
204 | .dpms = radeon_legacy_lvds_dpms, | 217 | .dpms = radeon_legacy_lvds_dpms, |
205 | .mode_fixup = radeon_legacy_lvds_mode_fixup, | 218 | .mode_fixup = radeon_legacy_mode_fixup, |
206 | .prepare = radeon_legacy_lvds_prepare, | 219 | .prepare = radeon_legacy_lvds_prepare, |
207 | .mode_set = radeon_legacy_lvds_mode_set, | 220 | .mode_set = radeon_legacy_lvds_mode_set, |
208 | .commit = radeon_legacy_lvds_commit, | 221 | .commit = radeon_legacy_lvds_commit, |
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = { | |||
214 | .destroy = radeon_enc_destroy, | 227 | .destroy = radeon_enc_destroy, |
215 | }; | 228 | }; |
216 | 229 | ||
217 | static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder, | ||
218 | struct drm_display_mode *mode, | ||
219 | struct drm_display_mode *adjusted_mode) | ||
220 | { | ||
221 | /* set the active encoder to connector routing */ | ||
222 | radeon_encoder_set_active_device(encoder); | ||
223 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
224 | |||
225 | return true; | ||
226 | } | ||
227 | |||
228 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) | 230 | static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) |
229 | { | 231 | { |
230 | struct drm_device *dev = encoder->dev; | 232 | struct drm_device *dev = encoder->dev; |
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc | |||
410 | 412 | ||
411 | static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { | 413 | static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { |
412 | .dpms = radeon_legacy_primary_dac_dpms, | 414 | .dpms = radeon_legacy_primary_dac_dpms, |
413 | .mode_fixup = radeon_legacy_primary_dac_mode_fixup, | 415 | .mode_fixup = radeon_legacy_mode_fixup, |
414 | .prepare = radeon_legacy_primary_dac_prepare, | 416 | .prepare = radeon_legacy_primary_dac_prepare, |
415 | .mode_set = radeon_legacy_primary_dac_mode_set, | 417 | .mode_set = radeon_legacy_primary_dac_mode_set, |
416 | .commit = radeon_legacy_primary_dac_commit, | 418 | .commit = radeon_legacy_primary_dac_commit, |
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = { | |||
423 | .destroy = radeon_enc_destroy, | 425 | .destroy = radeon_enc_destroy, |
424 | }; | 426 | }; |
425 | 427 | ||
426 | static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder, | ||
427 | struct drm_display_mode *mode, | ||
428 | struct drm_display_mode *adjusted_mode) | ||
429 | { | ||
430 | |||
431 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
432 | |||
433 | return true; | ||
434 | } | ||
435 | |||
436 | static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) | 428 | static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) |
437 | { | 429 | { |
438 | struct drm_device *dev = encoder->dev; | 430 | struct drm_device *dev = encoder->dev; |
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, | |||
584 | 576 | ||
585 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { | 577 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { |
586 | .dpms = radeon_legacy_tmds_int_dpms, | 578 | .dpms = radeon_legacy_tmds_int_dpms, |
587 | .mode_fixup = radeon_legacy_tmds_int_mode_fixup, | 579 | .mode_fixup = radeon_legacy_mode_fixup, |
588 | .prepare = radeon_legacy_tmds_int_prepare, | 580 | .prepare = radeon_legacy_tmds_int_prepare, |
589 | .mode_set = radeon_legacy_tmds_int_mode_set, | 581 | .mode_set = radeon_legacy_tmds_int_mode_set, |
590 | .commit = radeon_legacy_tmds_int_commit, | 582 | .commit = radeon_legacy_tmds_int_commit, |
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = { | |||
596 | .destroy = radeon_enc_destroy, | 588 | .destroy = radeon_enc_destroy, |
597 | }; | 589 | }; |
598 | 590 | ||
599 | static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder, | ||
600 | struct drm_display_mode *mode, | ||
601 | struct drm_display_mode *adjusted_mode) | ||
602 | { | ||
603 | /* set the active encoder to connector routing */ | ||
604 | radeon_encoder_set_active_device(encoder); | ||
605 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
606 | |||
607 | return true; | ||
608 | } | ||
609 | |||
610 | static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) | 591 | static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) |
611 | { | 592 | { |
612 | struct drm_device *dev = encoder->dev; | 593 | struct drm_device *dev = encoder->dev; |
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
697 | /*if (mode->clock > 165000) | 678 | /*if (mode->clock > 165000) |
698 | fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ | 679 | fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ |
699 | } | 680 | } |
681 | if (!radeon_combios_external_tmds_setup(encoder)) | ||
682 | radeon_external_tmds_setup(encoder); | ||
700 | } | 683 | } |
701 | 684 | ||
702 | if (radeon_crtc->crtc_id == 0) { | 685 | if (radeon_crtc->crtc_id == 0) { |
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
724 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); | 707 | radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
725 | } | 708 | } |
726 | 709 | ||
710 | static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) | ||
711 | { | ||
712 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | ||
713 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | ||
714 | if (tmds) { | ||
715 | if (tmds->i2c_bus) | ||
716 | radeon_i2c_destroy(tmds->i2c_bus); | ||
717 | } | ||
718 | kfree(radeon_encoder->enc_priv); | ||
719 | drm_encoder_cleanup(encoder); | ||
720 | kfree(radeon_encoder); | ||
721 | } | ||
722 | |||
727 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { | 723 | static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { |
728 | .dpms = radeon_legacy_tmds_ext_dpms, | 724 | .dpms = radeon_legacy_tmds_ext_dpms, |
729 | .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, | 725 | .mode_fixup = radeon_legacy_mode_fixup, |
730 | .prepare = radeon_legacy_tmds_ext_prepare, | 726 | .prepare = radeon_legacy_tmds_ext_prepare, |
731 | .mode_set = radeon_legacy_tmds_ext_mode_set, | 727 | .mode_set = radeon_legacy_tmds_ext_mode_set, |
732 | .commit = radeon_legacy_tmds_ext_commit, | 728 | .commit = radeon_legacy_tmds_ext_commit, |
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs | |||
735 | 731 | ||
736 | 732 | ||
737 | static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { | 733 | static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { |
738 | .destroy = radeon_enc_destroy, | 734 | .destroy = radeon_ext_tmds_enc_destroy, |
739 | }; | 735 | }; |
740 | 736 | ||
741 | static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder, | ||
742 | struct drm_display_mode *mode, | ||
743 | struct drm_display_mode *adjusted_mode) | ||
744 | { | ||
745 | /* set the active encoder to connector routing */ | ||
746 | radeon_encoder_set_active_device(encoder); | ||
747 | drm_mode_set_crtcinfo(adjusted_mode, 0); | ||
748 | |||
749 | return true; | ||
750 | } | ||
751 | |||
752 | static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) | 737 | static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) |
753 | { | 738 | { |
754 | struct drm_device *dev = encoder->dev; | 739 | struct drm_device *dev = encoder->dev; |
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder | |||
1265 | 1250 | ||
1266 | static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { | 1251 | static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { |
1267 | .dpms = radeon_legacy_tv_dac_dpms, | 1252 | .dpms = radeon_legacy_tv_dac_dpms, |
1268 | .mode_fixup = radeon_legacy_tv_dac_mode_fixup, | 1253 | .mode_fixup = radeon_legacy_mode_fixup, |
1269 | .prepare = radeon_legacy_tv_dac_prepare, | 1254 | .prepare = radeon_legacy_tv_dac_prepare, |
1270 | .mode_set = radeon_legacy_tv_dac_mode_set, | 1255 | .mode_set = radeon_legacy_tv_dac_mode_set, |
1271 | .commit = radeon_legacy_tv_dac_commit, | 1256 | .commit = radeon_legacy_tv_dac_commit, |
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon | |||
1302 | return tmds; | 1287 | return tmds; |
1303 | } | 1288 | } |
1304 | 1289 | ||
1290 | static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder) | ||
1291 | { | ||
1292 | struct drm_device *dev = encoder->base.dev; | ||
1293 | struct radeon_device *rdev = dev->dev_private; | ||
1294 | struct radeon_encoder_ext_tmds *tmds = NULL; | ||
1295 | bool ret; | ||
1296 | |||
1297 | if (rdev->is_atom_bios) | ||
1298 | return NULL; | ||
1299 | |||
1300 | tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL); | ||
1301 | |||
1302 | if (!tmds) | ||
1303 | return NULL; | ||
1304 | |||
1305 | ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); | ||
1306 | |||
1307 | if (ret == false) | ||
1308 | radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); | ||
1309 | |||
1310 | return tmds; | ||
1311 | } | ||
1312 | |||
1305 | void | 1313 | void |
1306 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) | 1314 | radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
1307 | { | 1315 | { |
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1329 | encoder->possible_crtcs = 0x1; | 1337 | encoder->possible_crtcs = 0x1; |
1330 | else | 1338 | else |
1331 | encoder->possible_crtcs = 0x3; | 1339 | encoder->possible_crtcs = 0x3; |
1332 | encoder->possible_clones = 0; | ||
1333 | 1340 | ||
1334 | radeon_encoder->enc_priv = NULL; | 1341 | radeon_encoder->enc_priv = NULL; |
1335 | 1342 | ||
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t | |||
1373 | drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); | 1380 | drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); |
1374 | drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); | 1381 | drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); |
1375 | if (!rdev->is_atom_bios) | 1382 | if (!rdev->is_atom_bios) |
1376 | radeon_combios_get_ext_tmds_info(radeon_encoder); | 1383 | radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder); |
1377 | break; | 1384 | break; |
1378 | } | 1385 | } |
1379 | } | 1386 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index ace726aa0d76..15ec7ca18a95 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <drm_crtc.h> | 33 | #include <drm_crtc.h> |
34 | #include <drm_mode.h> | 34 | #include <drm_mode.h> |
35 | #include <drm_edid.h> | 35 | #include <drm_edid.h> |
36 | #include <drm_dp_helper.h> | ||
36 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
37 | #include <linux/i2c-id.h> | 38 | #include <linux/i2c-id.h> |
38 | #include <linux/i2c-algo-bit.h> | 39 | #include <linux/i2c-algo-bit.h> |
@@ -89,24 +90,45 @@ enum radeon_tv_std { | |||
89 | TV_STD_PAL_CN, | 90 | TV_STD_PAL_CN, |
90 | }; | 91 | }; |
91 | 92 | ||
93 | /* radeon gpio-based i2c | ||
94 | * 1. "mask" reg and bits | ||
95 | * grabs the gpio pins for software use | ||
96 | * 0=not held 1=held | ||
97 | * 2. "a" reg and bits | ||
98 | * output pin value | ||
99 | * 0=low 1=high | ||
100 | * 3. "en" reg and bits | ||
101 | * sets the pin direction | ||
102 | * 0=input 1=output | ||
103 | * 4. "y" reg and bits | ||
104 | * input pin value | ||
105 | * 0=low 1=high | ||
106 | */ | ||
92 | struct radeon_i2c_bus_rec { | 107 | struct radeon_i2c_bus_rec { |
93 | bool valid; | 108 | bool valid; |
109 | /* id used by atom */ | ||
110 | uint8_t i2c_id; | ||
111 | /* can be used with hw i2c engine */ | ||
112 | bool hw_capable; | ||
113 | /* uses multi-media i2c engine */ | ||
114 | bool mm_i2c; | ||
115 | /* regs and bits */ | ||
94 | uint32_t mask_clk_reg; | 116 | uint32_t mask_clk_reg; |
95 | uint32_t mask_data_reg; | 117 | uint32_t mask_data_reg; |
96 | uint32_t a_clk_reg; | 118 | uint32_t a_clk_reg; |
97 | uint32_t a_data_reg; | 119 | uint32_t a_data_reg; |
98 | uint32_t put_clk_reg; | 120 | uint32_t en_clk_reg; |
99 | uint32_t put_data_reg; | 121 | uint32_t en_data_reg; |
100 | uint32_t get_clk_reg; | 122 | uint32_t y_clk_reg; |
101 | uint32_t get_data_reg; | 123 | uint32_t y_data_reg; |
102 | uint32_t mask_clk_mask; | 124 | uint32_t mask_clk_mask; |
103 | uint32_t mask_data_mask; | 125 | uint32_t mask_data_mask; |
104 | uint32_t put_clk_mask; | ||
105 | uint32_t put_data_mask; | ||
106 | uint32_t get_clk_mask; | ||
107 | uint32_t get_data_mask; | ||
108 | uint32_t a_clk_mask; | 126 | uint32_t a_clk_mask; |
109 | uint32_t a_data_mask; | 127 | uint32_t a_data_mask; |
128 | uint32_t en_clk_mask; | ||
129 | uint32_t en_data_mask; | ||
130 | uint32_t y_clk_mask; | ||
131 | uint32_t y_data_mask; | ||
110 | }; | 132 | }; |
111 | 133 | ||
112 | struct radeon_tmds_pll { | 134 | struct radeon_tmds_pll { |
@@ -150,9 +172,12 @@ struct radeon_pll { | |||
150 | }; | 172 | }; |
151 | 173 | ||
152 | struct radeon_i2c_chan { | 174 | struct radeon_i2c_chan { |
153 | struct drm_device *dev; | ||
154 | struct i2c_adapter adapter; | 175 | struct i2c_adapter adapter; |
155 | struct i2c_algo_bit_data algo; | 176 | struct drm_device *dev; |
177 | union { | ||
178 | struct i2c_algo_dp_aux_data dp; | ||
179 | struct i2c_algo_bit_data bit; | ||
180 | } algo; | ||
156 | struct radeon_i2c_bus_rec rec; | 181 | struct radeon_i2c_bus_rec rec; |
157 | }; | 182 | }; |
158 | 183 | ||
@@ -170,6 +195,11 @@ enum radeon_connector_table { | |||
170 | CT_EMAC, | 195 | CT_EMAC, |
171 | }; | 196 | }; |
172 | 197 | ||
198 | enum radeon_dvo_chip { | ||
199 | DVO_SIL164, | ||
200 | DVO_SIL1178, | ||
201 | }; | ||
202 | |||
173 | struct radeon_mode_info { | 203 | struct radeon_mode_info { |
174 | struct atom_context *atom_context; | 204 | struct atom_context *atom_context; |
175 | struct card_info *atom_card_info; | 205 | struct card_info *atom_card_info; |
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds { | |||
261 | struct radeon_tmds_pll tmds_pll[4]; | 291 | struct radeon_tmds_pll tmds_pll[4]; |
262 | }; | 292 | }; |
263 | 293 | ||
294 | struct radeon_encoder_ext_tmds { | ||
295 | /* tmds over dvo */ | ||
296 | struct radeon_i2c_chan *i2c_bus; | ||
297 | uint8_t slave_addr; | ||
298 | enum radeon_dvo_chip dvo_chip; | ||
299 | }; | ||
300 | |||
264 | /* spread spectrum */ | 301 | /* spread spectrum */ |
265 | struct radeon_atom_ss { | 302 | struct radeon_atom_ss { |
266 | uint16_t percentage; | 303 | uint16_t percentage; |
@@ -302,6 +339,35 @@ struct radeon_encoder { | |||
302 | struct radeon_connector_atom_dig { | 339 | struct radeon_connector_atom_dig { |
303 | uint32_t igp_lane_info; | 340 | uint32_t igp_lane_info; |
304 | bool linkb; | 341 | bool linkb; |
342 | /* displayport */ | ||
343 | struct radeon_i2c_chan *dp_i2c_bus; | ||
344 | u8 dpcd[8]; | ||
345 | u8 dp_sink_type; | ||
346 | int dp_clock; | ||
347 | int dp_lane_count; | ||
348 | }; | ||
349 | |||
350 | struct radeon_gpio_rec { | ||
351 | bool valid; | ||
352 | u8 id; | ||
353 | u32 reg; | ||
354 | u32 mask; | ||
355 | }; | ||
356 | |||
357 | enum radeon_hpd_id { | ||
358 | RADEON_HPD_NONE = 0, | ||
359 | RADEON_HPD_1, | ||
360 | RADEON_HPD_2, | ||
361 | RADEON_HPD_3, | ||
362 | RADEON_HPD_4, | ||
363 | RADEON_HPD_5, | ||
364 | RADEON_HPD_6, | ||
365 | }; | ||
366 | |||
367 | struct radeon_hpd { | ||
368 | enum radeon_hpd_id hpd; | ||
369 | u8 plugged_state; | ||
370 | struct radeon_gpio_rec gpio; | ||
305 | }; | 371 | }; |
306 | 372 | ||
307 | struct radeon_connector { | 373 | struct radeon_connector { |
@@ -318,6 +384,7 @@ struct radeon_connector { | |||
318 | void *con_priv; | 384 | void *con_priv; |
319 | bool dac_load_detect; | 385 | bool dac_load_detect; |
320 | uint16_t connector_object_id; | 386 | uint16_t connector_object_id; |
387 | struct radeon_hpd hpd; | ||
321 | }; | 388 | }; |
322 | 389 | ||
323 | struct radeon_framebuffer { | 390 | struct radeon_framebuffer { |
@@ -325,10 +392,37 @@ struct radeon_framebuffer { | |||
325 | struct drm_gem_object *obj; | 392 | struct drm_gem_object *obj; |
326 | }; | 393 | }; |
327 | 394 | ||
395 | extern void radeon_connector_hotplug(struct drm_connector *connector); | ||
396 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | ||
397 | extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, | ||
398 | struct drm_display_mode *mode); | ||
399 | extern void radeon_dp_set_link_config(struct drm_connector *connector, | ||
400 | struct drm_display_mode *mode); | ||
401 | extern void dp_link_train(struct drm_encoder *encoder, | ||
402 | struct drm_connector *connector); | ||
403 | extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); | ||
404 | extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); | ||
405 | extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, | ||
406 | int action, uint8_t lane_num, | ||
407 | uint8_t lane_set); | ||
408 | extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | ||
409 | uint8_t write_byte, uint8_t *read_byte); | ||
410 | |||
411 | extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, | ||
412 | struct radeon_i2c_bus_rec *rec, | ||
413 | const char *name); | ||
328 | extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, | 414 | extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, |
329 | struct radeon_i2c_bus_rec *rec, | 415 | struct radeon_i2c_bus_rec *rec, |
330 | const char *name); | 416 | const char *name); |
331 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); | 417 | extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); |
418 | extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, | ||
419 | u8 slave_addr, | ||
420 | u8 addr, | ||
421 | u8 *val); | ||
422 | extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, | ||
423 | u8 slave_addr, | ||
424 | u8 addr, | ||
425 | u8 val); | ||
332 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); | 426 | extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); |
333 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); | 427 | extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); |
334 | 428 | ||
@@ -343,12 +437,15 @@ extern void radeon_compute_pll(struct radeon_pll *pll, | |||
343 | uint32_t *post_div_p, | 437 | uint32_t *post_div_p, |
344 | int flags); | 438 | int flags); |
345 | 439 | ||
440 | extern void radeon_setup_encoder_clones(struct drm_device *dev); | ||
441 | |||
346 | struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); | 442 | struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); |
347 | struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 443 | struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
348 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); | 444 | struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); |
349 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); | 445 | struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); |
350 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); | 446 | struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
351 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); | 447 | extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); |
448 | extern void atombios_digital_setup(struct drm_encoder *encoder, int action); | ||
352 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); | 449 | extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
353 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); | 450 | extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
354 | 451 | ||
@@ -378,12 +475,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev); | |||
378 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); | 475 | extern bool radeon_combios_get_clock_info(struct drm_device *dev); |
379 | extern struct radeon_encoder_atom_dig * | 476 | extern struct radeon_encoder_atom_dig * |
380 | radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); | 477 | radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); |
381 | bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, | 478 | extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, |
382 | struct radeon_encoder_int_tmds *tmds); | 479 | struct radeon_encoder_int_tmds *tmds); |
383 | bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, | 480 | extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, |
384 | struct radeon_encoder_int_tmds *tmds); | 481 | struct radeon_encoder_int_tmds *tmds); |
385 | bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, | 482 | extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
386 | struct radeon_encoder_int_tmds *tmds); | 483 | struct radeon_encoder_int_tmds *tmds); |
484 | extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder, | ||
485 | struct radeon_encoder_ext_tmds *tmds); | ||
486 | extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder, | ||
487 | struct radeon_encoder_ext_tmds *tmds); | ||
387 | extern struct radeon_encoder_primary_dac * | 488 | extern struct radeon_encoder_primary_dac * |
388 | radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); | 489 | radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); |
389 | extern struct radeon_encoder_tv_dac * | 490 | extern struct radeon_encoder_tv_dac * |
@@ -395,6 +496,8 @@ extern struct radeon_encoder_tv_dac * | |||
395 | radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); | 496 | radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); |
396 | extern struct radeon_encoder_primary_dac * | 497 | extern struct radeon_encoder_primary_dac * |
397 | radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); | 498 | radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); |
499 | extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder); | ||
500 | extern void radeon_external_tmds_setup(struct drm_encoder *encoder); | ||
398 | extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); | 501 | extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); |
399 | extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); | 502 | extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); |
400 | extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); | 503 | extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); |
@@ -426,16 +529,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev, | |||
426 | struct radeon_crtc *radeon_crtc); | 529 | struct radeon_crtc *radeon_crtc); |
427 | void radeon_legacy_init_crtc(struct drm_device *dev, | 530 | void radeon_legacy_init_crtc(struct drm_device *dev, |
428 | struct radeon_crtc *radeon_crtc); | 531 | struct radeon_crtc *radeon_crtc); |
429 | void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); | 532 | extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state); |
430 | 533 | ||
431 | void radeon_get_clock_info(struct drm_device *dev); | 534 | void radeon_get_clock_info(struct drm_device *dev); |
432 | 535 | ||
433 | extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); | 536 | extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); |
434 | extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); | 537 | extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); |
435 | 538 | ||
436 | void radeon_rmx_mode_fixup(struct drm_encoder *encoder, | ||
437 | struct drm_display_mode *mode, | ||
438 | struct drm_display_mode *adjusted_mode); | ||
439 | void radeon_enc_destroy(struct drm_encoder *encoder); | 539 | void radeon_enc_destroy(struct drm_encoder *encoder); |
440 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); | 540 | void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
441 | void radeon_combios_asic_init(struct drm_device *dev); | 541 | void radeon_combios_asic_init(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f056dadc5c2..bec494384825 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -34,74 +34,32 @@ | |||
34 | #include "radeon_drm.h" | 34 | #include "radeon_drm.h" |
35 | #include "radeon.h" | 35 | #include "radeon.h" |
36 | 36 | ||
37 | struct radeon_object { | ||
38 | struct ttm_buffer_object tobj; | ||
39 | struct list_head list; | ||
40 | struct radeon_device *rdev; | ||
41 | struct drm_gem_object *gobj; | ||
42 | struct ttm_bo_kmap_obj kmap; | ||
43 | unsigned pin_count; | ||
44 | uint64_t gpu_addr; | ||
45 | void *kptr; | ||
46 | bool is_iomem; | ||
47 | uint32_t tiling_flags; | ||
48 | uint32_t pitch; | ||
49 | int surface_reg; | ||
50 | }; | ||
51 | 37 | ||
52 | int radeon_ttm_init(struct radeon_device *rdev); | 38 | int radeon_ttm_init(struct radeon_device *rdev); |
53 | void radeon_ttm_fini(struct radeon_device *rdev); | 39 | void radeon_ttm_fini(struct radeon_device *rdev); |
40 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); | ||
54 | 41 | ||
55 | /* | 42 | /* |
56 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | 43 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all |
57 | * function are calling it. | 44 | * function are calling it. |
58 | */ | 45 | */ |
59 | 46 | ||
60 | static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) | 47 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
61 | { | 48 | { |
62 | return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); | 49 | struct radeon_bo *bo; |
63 | } | ||
64 | 50 | ||
65 | static void radeon_object_unreserve(struct radeon_object *robj) | 51 | bo = container_of(tbo, struct radeon_bo, tbo); |
66 | { | 52 | mutex_lock(&bo->rdev->gem.mutex); |
67 | ttm_bo_unreserve(&robj->tobj); | 53 | list_del_init(&bo->list); |
54 | mutex_unlock(&bo->rdev->gem.mutex); | ||
55 | radeon_bo_clear_surface_reg(bo); | ||
56 | kfree(bo); | ||
68 | } | 57 | } |
69 | 58 | ||
70 | static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | 59 | static inline u32 radeon_ttm_flags_from_domain(u32 domain) |
71 | { | 60 | { |
72 | struct radeon_object *robj; | 61 | u32 flags = 0; |
73 | |||
74 | robj = container_of(tobj, struct radeon_object, tobj); | ||
75 | list_del_init(&robj->list); | ||
76 | radeon_object_clear_surface_reg(robj); | ||
77 | kfree(robj); | ||
78 | } | ||
79 | |||
80 | static inline void radeon_object_gpu_addr(struct radeon_object *robj) | ||
81 | { | ||
82 | /* Default gpu address */ | ||
83 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
84 | if (robj->tobj.mem.mm_node == NULL) { | ||
85 | return; | ||
86 | } | ||
87 | robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT; | ||
88 | switch (robj->tobj.mem.mem_type) { | ||
89 | case TTM_PL_VRAM: | ||
90 | robj->gpu_addr += (u64)robj->rdev->mc.vram_location; | ||
91 | break; | ||
92 | case TTM_PL_TT: | ||
93 | robj->gpu_addr += (u64)robj->rdev->mc.gtt_location; | ||
94 | break; | ||
95 | default: | ||
96 | DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type); | ||
97 | robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL; | ||
98 | return; | ||
99 | } | ||
100 | } | ||
101 | 62 | ||
102 | static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | ||
103 | { | ||
104 | uint32_t flags = 0; | ||
105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 63 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; | 64 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
107 | } | 65 | } |
@@ -117,17 +75,13 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
117 | return flags; | 75 | return flags; |
118 | } | 76 | } |
119 | 77 | ||
120 | int radeon_object_create(struct radeon_device *rdev, | 78 | int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, |
121 | struct drm_gem_object *gobj, | 79 | unsigned long size, bool kernel, u32 domain, |
122 | unsigned long size, | 80 | struct radeon_bo **bo_ptr) |
123 | bool kernel, | ||
124 | uint32_t domain, | ||
125 | bool interruptible, | ||
126 | struct radeon_object **robj_ptr) | ||
127 | { | 81 | { |
128 | struct radeon_object *robj; | 82 | struct radeon_bo *bo; |
129 | enum ttm_bo_type type; | 83 | enum ttm_bo_type type; |
130 | uint32_t flags; | 84 | u32 flags; |
131 | int r; | 85 | int r; |
132 | 86 | ||
133 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { | 87 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
@@ -138,206 +92,140 @@ int radeon_object_create(struct radeon_device *rdev, | |||
138 | } else { | 92 | } else { |
139 | type = ttm_bo_type_device; | 93 | type = ttm_bo_type_device; |
140 | } | 94 | } |
141 | *robj_ptr = NULL; | 95 | *bo_ptr = NULL; |
142 | robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); | 96 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
143 | if (robj == NULL) { | 97 | if (bo == NULL) |
144 | return -ENOMEM; | 98 | return -ENOMEM; |
145 | } | 99 | bo->rdev = rdev; |
146 | robj->rdev = rdev; | 100 | bo->gobj = gobj; |
147 | robj->gobj = gobj; | 101 | bo->surface_reg = -1; |
148 | robj->surface_reg = -1; | 102 | INIT_LIST_HEAD(&bo->list); |
149 | INIT_LIST_HEAD(&robj->list); | 103 | |
150 | 104 | flags = radeon_ttm_flags_from_domain(domain); | |
151 | flags = radeon_object_flags_from_domain(domain); | 105 | retry: |
152 | r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, | 106 | r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type, |
153 | 0, 0, false, NULL, size, | 107 | flags, 0, 0, true, NULL, size, |
154 | &radeon_ttm_object_object_destroy); | 108 | &radeon_ttm_bo_destroy); |
155 | if (unlikely(r != 0)) { | 109 | if (unlikely(r != 0)) { |
110 | if (r == -ERESTART) | ||
111 | goto retry; | ||
156 | /* ttm call radeon_ttm_object_object_destroy if error happen */ | 112 | /* ttm call radeon_ttm_object_object_destroy if error happen */ |
157 | DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", | 113 | dev_err(rdev->dev, "object_init failed for (%ld, 0x%08X)\n", |
158 | size, flags, 0); | 114 | size, flags); |
159 | return r; | 115 | return r; |
160 | } | 116 | } |
161 | *robj_ptr = robj; | 117 | *bo_ptr = bo; |
162 | if (gobj) { | 118 | if (gobj) { |
163 | list_add_tail(&robj->list, &rdev->gem.objects); | 119 | mutex_lock(&bo->rdev->gem.mutex); |
120 | list_add_tail(&bo->list, &rdev->gem.objects); | ||
121 | mutex_unlock(&bo->rdev->gem.mutex); | ||
164 | } | 122 | } |
165 | return 0; | 123 | return 0; |
166 | } | 124 | } |
167 | 125 | ||
168 | int radeon_object_kmap(struct radeon_object *robj, void **ptr) | 126 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
169 | { | 127 | { |
128 | bool is_iomem; | ||
170 | int r; | 129 | int r; |
171 | 130 | ||
172 | spin_lock(&robj->tobj.lock); | 131 | if (bo->kptr) { |
173 | if (robj->kptr) { | ||
174 | if (ptr) { | 132 | if (ptr) { |
175 | *ptr = robj->kptr; | 133 | *ptr = bo->kptr; |
176 | } | 134 | } |
177 | spin_unlock(&robj->tobj.lock); | ||
178 | return 0; | 135 | return 0; |
179 | } | 136 | } |
180 | spin_unlock(&robj->tobj.lock); | 137 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
181 | r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap); | ||
182 | if (r) { | 138 | if (r) { |
183 | return r; | 139 | return r; |
184 | } | 140 | } |
185 | spin_lock(&robj->tobj.lock); | 141 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
186 | robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem); | ||
187 | spin_unlock(&robj->tobj.lock); | ||
188 | if (ptr) { | 142 | if (ptr) { |
189 | *ptr = robj->kptr; | 143 | *ptr = bo->kptr; |
190 | } | 144 | } |
191 | radeon_object_check_tiling(robj, 0, 0); | 145 | radeon_bo_check_tiling(bo, 0, 0); |
192 | return 0; | 146 | return 0; |
193 | } | 147 | } |
194 | 148 | ||
195 | void radeon_object_kunmap(struct radeon_object *robj) | 149 | void radeon_bo_kunmap(struct radeon_bo *bo) |
196 | { | 150 | { |
197 | spin_lock(&robj->tobj.lock); | 151 | if (bo->kptr == NULL) |
198 | if (robj->kptr == NULL) { | ||
199 | spin_unlock(&robj->tobj.lock); | ||
200 | return; | 152 | return; |
201 | } | 153 | bo->kptr = NULL; |
202 | robj->kptr = NULL; | 154 | radeon_bo_check_tiling(bo, 0, 0); |
203 | spin_unlock(&robj->tobj.lock); | 155 | ttm_bo_kunmap(&bo->kmap); |
204 | radeon_object_check_tiling(robj, 0, 0); | ||
205 | ttm_bo_kunmap(&robj->kmap); | ||
206 | } | 156 | } |
207 | 157 | ||
208 | void radeon_object_unref(struct radeon_object **robj) | 158 | void radeon_bo_unref(struct radeon_bo **bo) |
209 | { | 159 | { |
210 | struct ttm_buffer_object *tobj; | 160 | struct ttm_buffer_object *tbo; |
211 | 161 | ||
212 | if ((*robj) == NULL) { | 162 | if ((*bo) == NULL) |
213 | return; | 163 | return; |
214 | } | 164 | tbo = &((*bo)->tbo); |
215 | tobj = &((*robj)->tobj); | 165 | ttm_bo_unref(&tbo); |
216 | ttm_bo_unref(&tobj); | 166 | if (tbo == NULL) |
217 | if (tobj == NULL) { | 167 | *bo = NULL; |
218 | *robj = NULL; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset) | ||
223 | { | ||
224 | *offset = robj->tobj.addr_space_offset; | ||
225 | return 0; | ||
226 | } | 168 | } |
227 | 169 | ||
228 | int radeon_object_pin(struct radeon_object *robj, uint32_t domain, | 170 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
229 | uint64_t *gpu_addr) | ||
230 | { | 171 | { |
231 | uint32_t flags; | 172 | u32 flags; |
232 | uint32_t tmp; | 173 | u32 tmp; |
233 | int r; | 174 | int r; |
234 | 175 | ||
235 | flags = radeon_object_flags_from_domain(domain); | 176 | flags = radeon_ttm_flags_from_domain(domain); |
236 | spin_lock(&robj->tobj.lock); | 177 | if (bo->pin_count) { |
237 | if (robj->pin_count) { | 178 | bo->pin_count++; |
238 | robj->pin_count++; | 179 | if (gpu_addr) |
239 | if (gpu_addr != NULL) { | 180 | *gpu_addr = radeon_bo_gpu_offset(bo); |
240 | *gpu_addr = robj->gpu_addr; | ||
241 | } | ||
242 | spin_unlock(&robj->tobj.lock); | ||
243 | return 0; | 181 | return 0; |
244 | } | 182 | } |
245 | spin_unlock(&robj->tobj.lock); | 183 | tmp = bo->tbo.mem.placement; |
246 | r = radeon_object_reserve(robj, false); | ||
247 | if (unlikely(r != 0)) { | ||
248 | DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); | ||
249 | return r; | ||
250 | } | ||
251 | tmp = robj->tobj.mem.placement; | ||
252 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); | 184 | ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); |
253 | robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; | 185 | bo->tbo.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | |
254 | r = ttm_buffer_object_validate(&robj->tobj, | 186 | TTM_PL_MASK_CACHING; |
255 | robj->tobj.proposed_placement, | 187 | retry: |
256 | false, false); | 188 | r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, |
257 | radeon_object_gpu_addr(robj); | 189 | true, false); |
258 | if (gpu_addr != NULL) { | 190 | if (likely(r == 0)) { |
259 | *gpu_addr = robj->gpu_addr; | 191 | bo->pin_count = 1; |
192 | if (gpu_addr != NULL) | ||
193 | *gpu_addr = radeon_bo_gpu_offset(bo); | ||
260 | } | 194 | } |
261 | robj->pin_count = 1; | ||
262 | if (unlikely(r != 0)) { | 195 | if (unlikely(r != 0)) { |
263 | DRM_ERROR("radeon: failed to pin object.\n"); | 196 | if (r == -ERESTART) |
197 | goto retry; | ||
198 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); | ||
264 | } | 199 | } |
265 | radeon_object_unreserve(robj); | ||
266 | return r; | 200 | return r; |
267 | } | 201 | } |
268 | 202 | ||
269 | void radeon_object_unpin(struct radeon_object *robj) | 203 | int radeon_bo_unpin(struct radeon_bo *bo) |
270 | { | 204 | { |
271 | uint32_t flags; | ||
272 | int r; | 205 | int r; |
273 | 206 | ||
274 | spin_lock(&robj->tobj.lock); | 207 | if (!bo->pin_count) { |
275 | if (!robj->pin_count) { | 208 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); |
276 | spin_unlock(&robj->tobj.lock); | 209 | return 0; |
277 | printk(KERN_WARNING "Unpin not necessary for %p !\n", robj); | ||
278 | return; | ||
279 | } | ||
280 | robj->pin_count--; | ||
281 | if (robj->pin_count) { | ||
282 | spin_unlock(&robj->tobj.lock); | ||
283 | return; | ||
284 | } | ||
285 | spin_unlock(&robj->tobj.lock); | ||
286 | r = radeon_object_reserve(robj, false); | ||
287 | if (unlikely(r != 0)) { | ||
288 | DRM_ERROR("radeon: failed to reserve object for unpinning it.\n"); | ||
289 | return; | ||
290 | } | ||
291 | flags = robj->tobj.mem.placement; | ||
292 | robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT; | ||
293 | r = ttm_buffer_object_validate(&robj->tobj, | ||
294 | robj->tobj.proposed_placement, | ||
295 | false, false); | ||
296 | if (unlikely(r != 0)) { | ||
297 | DRM_ERROR("radeon: failed to unpin buffer.\n"); | ||
298 | } | ||
299 | radeon_object_unreserve(robj); | ||
300 | } | ||
301 | |||
302 | int radeon_object_wait(struct radeon_object *robj) | ||
303 | { | ||
304 | int r = 0; | ||
305 | |||
306 | /* FIXME: should use block reservation instead */ | ||
307 | r = radeon_object_reserve(robj, true); | ||
308 | if (unlikely(r != 0)) { | ||
309 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | ||
310 | return r; | ||
311 | } | ||
312 | spin_lock(&robj->tobj.lock); | ||
313 | if (robj->tobj.sync_obj) { | ||
314 | r = ttm_bo_wait(&robj->tobj, true, true, false); | ||
315 | } | 210 | } |
316 | spin_unlock(&robj->tobj.lock); | 211 | bo->pin_count--; |
317 | radeon_object_unreserve(robj); | 212 | if (bo->pin_count) |
318 | return r; | 213 | return 0; |
319 | } | 214 | bo->tbo.proposed_placement = bo->tbo.mem.placement & |
320 | 215 | ~TTM_PL_FLAG_NO_EVICT; | |
321 | int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement) | 216 | retry: |
322 | { | 217 | r = ttm_buffer_object_validate(&bo->tbo, bo->tbo.proposed_placement, |
323 | int r = 0; | 218 | true, false); |
324 | |||
325 | r = radeon_object_reserve(robj, true); | ||
326 | if (unlikely(r != 0)) { | 219 | if (unlikely(r != 0)) { |
327 | DRM_ERROR("radeon: failed to reserve object for waiting.\n"); | 220 | if (r == -ERESTART) |
221 | goto retry; | ||
222 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); | ||
328 | return r; | 223 | return r; |
329 | } | 224 | } |
330 | spin_lock(&robj->tobj.lock); | 225 | return 0; |
331 | *cur_placement = robj->tobj.mem.mem_type; | ||
332 | if (robj->tobj.sync_obj) { | ||
333 | r = ttm_bo_wait(&robj->tobj, true, true, true); | ||
334 | } | ||
335 | spin_unlock(&robj->tobj.lock); | ||
336 | radeon_object_unreserve(robj); | ||
337 | return r; | ||
338 | } | 226 | } |
339 | 227 | ||
340 | int radeon_object_evict_vram(struct radeon_device *rdev) | 228 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
341 | { | 229 | { |
342 | if (rdev->flags & RADEON_IS_IGP) { | 230 | if (rdev->flags & RADEON_IS_IGP) { |
343 | /* Useless to evict on IGP chips */ | 231 | /* Useless to evict on IGP chips */ |
@@ -346,30 +234,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev) | |||
346 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 234 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
347 | } | 235 | } |
348 | 236 | ||
349 | void radeon_object_force_delete(struct radeon_device *rdev) | 237 | void radeon_bo_force_delete(struct radeon_device *rdev) |
350 | { | 238 | { |
351 | struct radeon_object *robj, *n; | 239 | struct radeon_bo *bo, *n; |
352 | struct drm_gem_object *gobj; | 240 | struct drm_gem_object *gobj; |
353 | 241 | ||
354 | if (list_empty(&rdev->gem.objects)) { | 242 | if (list_empty(&rdev->gem.objects)) { |
355 | return; | 243 | return; |
356 | } | 244 | } |
357 | DRM_ERROR("Userspace still has active objects !\n"); | 245 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
358 | list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { | 246 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { |
359 | mutex_lock(&rdev->ddev->struct_mutex); | 247 | mutex_lock(&rdev->ddev->struct_mutex); |
360 | gobj = robj->gobj; | 248 | gobj = bo->gobj; |
361 | DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", | 249 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
362 | gobj, robj, (unsigned long)gobj->size, | 250 | gobj, bo, (unsigned long)gobj->size, |
363 | *((unsigned long *)&gobj->refcount)); | 251 | *((unsigned long *)&gobj->refcount)); |
364 | list_del_init(&robj->list); | 252 | mutex_lock(&bo->rdev->gem.mutex); |
365 | radeon_object_unref(&robj); | 253 | list_del_init(&bo->list); |
254 | mutex_unlock(&bo->rdev->gem.mutex); | ||
255 | radeon_bo_unref(&bo); | ||
366 | gobj->driver_private = NULL; | 256 | gobj->driver_private = NULL; |
367 | drm_gem_object_unreference(gobj); | 257 | drm_gem_object_unreference(gobj); |
368 | mutex_unlock(&rdev->ddev->struct_mutex); | 258 | mutex_unlock(&rdev->ddev->struct_mutex); |
369 | } | 259 | } |
370 | } | 260 | } |
371 | 261 | ||
372 | int radeon_object_init(struct radeon_device *rdev) | 262 | int radeon_bo_init(struct radeon_device *rdev) |
373 | { | 263 | { |
374 | /* Add an MTRR for the VRAM */ | 264 | /* Add an MTRR for the VRAM */ |
375 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | 265 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
@@ -382,13 +272,13 @@ int radeon_object_init(struct radeon_device *rdev) | |||
382 | return radeon_ttm_init(rdev); | 272 | return radeon_ttm_init(rdev); |
383 | } | 273 | } |
384 | 274 | ||
385 | void radeon_object_fini(struct radeon_device *rdev) | 275 | void radeon_bo_fini(struct radeon_device *rdev) |
386 | { | 276 | { |
387 | radeon_ttm_fini(rdev); | 277 | radeon_ttm_fini(rdev); |
388 | } | 278 | } |
389 | 279 | ||
390 | void radeon_object_list_add_object(struct radeon_object_list *lobj, | 280 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
391 | struct list_head *head) | 281 | struct list_head *head) |
392 | { | 282 | { |
393 | if (lobj->wdomain) { | 283 | if (lobj->wdomain) { |
394 | list_add(&lobj->list, head); | 284 | list_add(&lobj->list, head); |
@@ -397,72 +287,67 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, | |||
397 | } | 287 | } |
398 | } | 288 | } |
399 | 289 | ||
400 | int radeon_object_list_reserve(struct list_head *head) | 290 | int radeon_bo_list_reserve(struct list_head *head) |
401 | { | 291 | { |
402 | struct radeon_object_list *lobj; | 292 | struct radeon_bo_list *lobj; |
403 | int r; | 293 | int r; |
404 | 294 | ||
405 | list_for_each_entry(lobj, head, list){ | 295 | list_for_each_entry(lobj, head, list){ |
406 | if (!lobj->robj->pin_count) { | 296 | r = radeon_bo_reserve(lobj->bo, false); |
407 | r = radeon_object_reserve(lobj->robj, true); | 297 | if (unlikely(r != 0)) |
408 | if (unlikely(r != 0)) { | 298 | return r; |
409 | DRM_ERROR("radeon: failed to reserve object.\n"); | ||
410 | return r; | ||
411 | } | ||
412 | } else { | ||
413 | } | ||
414 | } | 299 | } |
415 | return 0; | 300 | return 0; |
416 | } | 301 | } |
417 | 302 | ||
418 | void radeon_object_list_unreserve(struct list_head *head) | 303 | void radeon_bo_list_unreserve(struct list_head *head) |
419 | { | 304 | { |
420 | struct radeon_object_list *lobj; | 305 | struct radeon_bo_list *lobj; |
421 | 306 | ||
422 | list_for_each_entry(lobj, head, list) { | 307 | list_for_each_entry(lobj, head, list) { |
423 | if (!lobj->robj->pin_count) { | 308 | /* only unreserve object we successfully reserved */ |
424 | radeon_object_unreserve(lobj->robj); | 309 | if (radeon_bo_is_reserved(lobj->bo)) |
425 | } | 310 | radeon_bo_unreserve(lobj->bo); |
426 | } | 311 | } |
427 | } | 312 | } |
428 | 313 | ||
429 | int radeon_object_list_validate(struct list_head *head, void *fence) | 314 | int radeon_bo_list_validate(struct list_head *head, void *fence) |
430 | { | 315 | { |
431 | struct radeon_object_list *lobj; | 316 | struct radeon_bo_list *lobj; |
432 | struct radeon_object *robj; | 317 | struct radeon_bo *bo; |
433 | struct radeon_fence *old_fence = NULL; | 318 | struct radeon_fence *old_fence = NULL; |
434 | int r; | 319 | int r; |
435 | 320 | ||
436 | r = radeon_object_list_reserve(head); | 321 | r = radeon_bo_list_reserve(head); |
437 | if (unlikely(r != 0)) { | 322 | if (unlikely(r != 0)) { |
438 | radeon_object_list_unreserve(head); | ||
439 | return r; | 323 | return r; |
440 | } | 324 | } |
441 | list_for_each_entry(lobj, head, list) { | 325 | list_for_each_entry(lobj, head, list) { |
442 | robj = lobj->robj; | 326 | bo = lobj->bo; |
443 | if (!robj->pin_count) { | 327 | if (!bo->pin_count) { |
444 | if (lobj->wdomain) { | 328 | if (lobj->wdomain) { |
445 | robj->tobj.proposed_placement = | 329 | bo->tbo.proposed_placement = |
446 | radeon_object_flags_from_domain(lobj->wdomain); | 330 | radeon_ttm_flags_from_domain(lobj->wdomain); |
447 | } else { | 331 | } else { |
448 | robj->tobj.proposed_placement = | 332 | bo->tbo.proposed_placement = |
449 | radeon_object_flags_from_domain(lobj->rdomain); | 333 | radeon_ttm_flags_from_domain(lobj->rdomain); |
450 | } | 334 | } |
451 | r = ttm_buffer_object_validate(&robj->tobj, | 335 | retry: |
452 | robj->tobj.proposed_placement, | 336 | r = ttm_buffer_object_validate(&bo->tbo, |
453 | true, false); | 337 | bo->tbo.proposed_placement, |
338 | true, false); | ||
454 | if (unlikely(r)) { | 339 | if (unlikely(r)) { |
455 | DRM_ERROR("radeon: failed to validate.\n"); | 340 | if (r == -ERESTART) |
341 | goto retry; | ||
456 | return r; | 342 | return r; |
457 | } | 343 | } |
458 | radeon_object_gpu_addr(robj); | ||
459 | } | 344 | } |
460 | lobj->gpu_offset = robj->gpu_addr; | 345 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
461 | lobj->tiling_flags = robj->tiling_flags; | 346 | lobj->tiling_flags = bo->tiling_flags; |
462 | if (fence) { | 347 | if (fence) { |
463 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 348 | old_fence = (struct radeon_fence *)bo->tbo.sync_obj; |
464 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 349 | bo->tbo.sync_obj = radeon_fence_ref(fence); |
465 | robj->tobj.sync_obj_arg = NULL; | 350 | bo->tbo.sync_obj_arg = NULL; |
466 | } | 351 | } |
467 | if (old_fence) { | 352 | if (old_fence) { |
468 | radeon_fence_unref(&old_fence); | 353 | radeon_fence_unref(&old_fence); |
@@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
471 | return 0; | 356 | return 0; |
472 | } | 357 | } |
473 | 358 | ||
474 | void radeon_object_list_unvalidate(struct list_head *head) | 359 | void radeon_bo_list_unvalidate(struct list_head *head, void *fence) |
475 | { | 360 | { |
476 | struct radeon_object_list *lobj; | 361 | struct radeon_bo_list *lobj; |
477 | struct radeon_fence *old_fence = NULL; | 362 | struct radeon_fence *old_fence; |
478 | 363 | ||
479 | list_for_each_entry(lobj, head, list) { | 364 | if (fence) |
480 | old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; | 365 | list_for_each_entry(lobj, head, list) { |
481 | lobj->robj->tobj.sync_obj = NULL; | 366 | old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj); |
482 | if (old_fence) { | 367 | if (old_fence == fence) { |
483 | radeon_fence_unref(&old_fence); | 368 | lobj->bo->tbo.sync_obj = NULL; |
369 | radeon_fence_unref(&old_fence); | ||
370 | } | ||
484 | } | 371 | } |
485 | } | 372 | radeon_bo_list_unreserve(head); |
486 | radeon_object_list_unreserve(head); | ||
487 | } | ||
488 | |||
489 | void radeon_object_list_clean(struct list_head *head) | ||
490 | { | ||
491 | radeon_object_list_unreserve(head); | ||
492 | } | 373 | } |
493 | 374 | ||
494 | int radeon_object_fbdev_mmap(struct radeon_object *robj, | 375 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
495 | struct vm_area_struct *vma) | 376 | struct vm_area_struct *vma) |
496 | { | 377 | { |
497 | return ttm_fbdev_mmap(vma, &robj->tobj); | 378 | return ttm_fbdev_mmap(vma, &bo->tbo); |
498 | } | 379 | } |
499 | 380 | ||
500 | unsigned long radeon_object_size(struct radeon_object *robj) | 381 | static int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
501 | { | 382 | { |
502 | return robj->tobj.num_pages << PAGE_SHIFT; | 383 | struct radeon_device *rdev = bo->rdev; |
503 | } | ||
504 | |||
505 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
506 | { | ||
507 | struct radeon_device *rdev = robj->rdev; | ||
508 | struct radeon_surface_reg *reg; | 384 | struct radeon_surface_reg *reg; |
509 | struct radeon_object *old_object; | 385 | struct radeon_bo *old_object; |
510 | int steal; | 386 | int steal; |
511 | int i; | 387 | int i; |
512 | 388 | ||
513 | if (!robj->tiling_flags) | 389 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
390 | |||
391 | if (!bo->tiling_flags) | ||
514 | return 0; | 392 | return 0; |
515 | 393 | ||
516 | if (robj->surface_reg >= 0) { | 394 | if (bo->surface_reg >= 0) { |
517 | reg = &rdev->surface_regs[robj->surface_reg]; | 395 | reg = &rdev->surface_regs[bo->surface_reg]; |
518 | i = robj->surface_reg; | 396 | i = bo->surface_reg; |
519 | goto out; | 397 | goto out; |
520 | } | 398 | } |
521 | 399 | ||
@@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
523 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | 401 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { |
524 | 402 | ||
525 | reg = &rdev->surface_regs[i]; | 403 | reg = &rdev->surface_regs[i]; |
526 | if (!reg->robj) | 404 | if (!reg->bo) |
527 | break; | 405 | break; |
528 | 406 | ||
529 | old_object = reg->robj; | 407 | old_object = reg->bo; |
530 | if (old_object->pin_count == 0) | 408 | if (old_object->pin_count == 0) |
531 | steal = i; | 409 | steal = i; |
532 | } | 410 | } |
@@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj) | |||
537 | return -ENOMEM; | 415 | return -ENOMEM; |
538 | /* find someone with a surface reg and nuke their BO */ | 416 | /* find someone with a surface reg and nuke their BO */ |
539 | reg = &rdev->surface_regs[steal]; | 417 | reg = &rdev->surface_regs[steal]; |
540 | old_object = reg->robj; | 418 | old_object = reg->bo; |
541 | /* blow away the mapping */ | 419 | /* blow away the mapping */ |
542 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | 420 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); |
543 | ttm_bo_unmap_virtual(&old_object->tobj); | 421 | ttm_bo_unmap_virtual(&old_object->tbo); |
544 | old_object->surface_reg = -1; | 422 | old_object->surface_reg = -1; |
545 | i = steal; | 423 | i = steal; |
546 | } | 424 | } |
547 | 425 | ||
548 | robj->surface_reg = i; | 426 | bo->surface_reg = i; |
549 | reg->robj = robj; | 427 | reg->bo = bo; |
550 | 428 | ||
551 | out: | 429 | out: |
552 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | 430 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
553 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | 431 | bo->tbo.mem.mm_node->start << PAGE_SHIFT, |
554 | robj->tobj.num_pages << PAGE_SHIFT); | 432 | bo->tbo.num_pages << PAGE_SHIFT); |
555 | return 0; | 433 | return 0; |
556 | } | 434 | } |
557 | 435 | ||
558 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | 436 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
559 | { | 437 | { |
560 | struct radeon_device *rdev = robj->rdev; | 438 | struct radeon_device *rdev = bo->rdev; |
561 | struct radeon_surface_reg *reg; | 439 | struct radeon_surface_reg *reg; |
562 | 440 | ||
563 | if (robj->surface_reg == -1) | 441 | if (bo->surface_reg == -1) |
564 | return; | 442 | return; |
565 | 443 | ||
566 | reg = &rdev->surface_regs[robj->surface_reg]; | 444 | reg = &rdev->surface_regs[bo->surface_reg]; |
567 | radeon_clear_surface_reg(rdev, robj->surface_reg); | 445 | radeon_clear_surface_reg(rdev, bo->surface_reg); |
568 | 446 | ||
569 | reg->robj = NULL; | 447 | reg->bo = NULL; |
570 | robj->surface_reg = -1; | 448 | bo->surface_reg = -1; |
571 | } | 449 | } |
572 | 450 | ||
573 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | 451 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
574 | uint32_t tiling_flags, uint32_t pitch) | 452 | uint32_t tiling_flags, uint32_t pitch) |
575 | { | 453 | { |
576 | robj->tiling_flags = tiling_flags; | 454 | int r; |
577 | robj->pitch = pitch; | 455 | |
456 | r = radeon_bo_reserve(bo, false); | ||
457 | if (unlikely(r != 0)) | ||
458 | return r; | ||
459 | bo->tiling_flags = tiling_flags; | ||
460 | bo->pitch = pitch; | ||
461 | radeon_bo_unreserve(bo); | ||
462 | return 0; | ||
578 | } | 463 | } |
579 | 464 | ||
580 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | 465 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
581 | uint32_t *tiling_flags, | 466 | uint32_t *tiling_flags, |
582 | uint32_t *pitch) | 467 | uint32_t *pitch) |
583 | { | 468 | { |
469 | BUG_ON(!atomic_read(&bo->tbo.reserved)); | ||
584 | if (tiling_flags) | 470 | if (tiling_flags) |
585 | *tiling_flags = robj->tiling_flags; | 471 | *tiling_flags = bo->tiling_flags; |
586 | if (pitch) | 472 | if (pitch) |
587 | *pitch = robj->pitch; | 473 | *pitch = bo->pitch; |
588 | } | 474 | } |
589 | 475 | ||
590 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | 476 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
591 | bool force_drop) | 477 | bool force_drop) |
592 | { | 478 | { |
593 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | 479 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
480 | |||
481 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | ||
594 | return 0; | 482 | return 0; |
595 | 483 | ||
596 | if (force_drop) { | 484 | if (force_drop) { |
597 | radeon_object_clear_surface_reg(robj); | 485 | radeon_bo_clear_surface_reg(bo); |
598 | return 0; | 486 | return 0; |
599 | } | 487 | } |
600 | 488 | ||
601 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | 489 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
602 | if (!has_moved) | 490 | if (!has_moved) |
603 | return 0; | 491 | return 0; |
604 | 492 | ||
605 | if (robj->surface_reg >= 0) | 493 | if (bo->surface_reg >= 0) |
606 | radeon_object_clear_surface_reg(robj); | 494 | radeon_bo_clear_surface_reg(bo); |
607 | return 0; | 495 | return 0; |
608 | } | 496 | } |
609 | 497 | ||
610 | if ((robj->surface_reg >= 0) && !has_moved) | 498 | if ((bo->surface_reg >= 0) && !has_moved) |
611 | return 0; | 499 | return 0; |
612 | 500 | ||
613 | return radeon_object_get_surface_reg(robj); | 501 | return radeon_bo_get_surface_reg(bo); |
614 | } | 502 | } |
615 | 503 | ||
616 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | 504 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, |
617 | struct ttm_mem_reg *mem) | 505 | struct ttm_mem_reg *mem) |
618 | { | 506 | { |
619 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 507 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
620 | radeon_object_check_tiling(robj, 0, 1); | 508 | radeon_bo_check_tiling(rbo, 0, 1); |
621 | } | 509 | } |
622 | 510 | ||
623 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | 511 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
624 | { | 512 | { |
625 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | 513 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
626 | radeon_object_check_tiling(robj, 0, 0); | 514 | radeon_bo_check_tiling(rbo, 0, 0); |
627 | } | 515 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 10e8af6bb456..e9da13077e2f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -28,19 +28,152 @@ | |||
28 | #ifndef __RADEON_OBJECT_H__ | 28 | #ifndef __RADEON_OBJECT_H__ |
29 | #define __RADEON_OBJECT_H__ | 29 | #define __RADEON_OBJECT_H__ |
30 | 30 | ||
31 | #include <ttm/ttm_bo_api.h> | 31 | #include <drm/radeon_drm.h> |
32 | #include <ttm/ttm_bo_driver.h> | 32 | #include "radeon.h" |
33 | #include <ttm/ttm_placement.h> | ||
34 | #include <ttm/ttm_module.h> | ||
35 | 33 | ||
36 | /* | 34 | /** |
37 | * TTM. | 35 | * radeon_mem_type_to_domain - return domain corresponding to mem_type |
36 | * @mem_type: ttm memory type | ||
37 | * | ||
38 | * Returns corresponding domain of the ttm mem_type | ||
39 | */ | ||
40 | static inline unsigned radeon_mem_type_to_domain(u32 mem_type) | ||
41 | { | ||
42 | switch (mem_type) { | ||
43 | case TTM_PL_VRAM: | ||
44 | return RADEON_GEM_DOMAIN_VRAM; | ||
45 | case TTM_PL_TT: | ||
46 | return RADEON_GEM_DOMAIN_GTT; | ||
47 | case TTM_PL_SYSTEM: | ||
48 | return RADEON_GEM_DOMAIN_CPU; | ||
49 | default: | ||
50 | break; | ||
51 | } | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * radeon_bo_reserve - reserve bo | ||
57 | * @bo: bo structure | ||
58 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | ||
59 | * | ||
60 | * Returns: | ||
61 | * -EBUSY: buffer is busy and @no_wait is true | ||
62 | * -ERESTART: A wait for the buffer to become unreserved was interrupted by | ||
63 | * a signal. Release all buffer reservations and return to user-space. | ||
64 | */ | ||
65 | static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | ||
66 | { | ||
67 | int r; | ||
68 | |||
69 | retry: | ||
70 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
71 | if (unlikely(r != 0)) { | ||
72 | if (r == -ERESTART) | ||
73 | goto retry; | ||
74 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | ||
75 | return r; | ||
76 | } | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static inline void radeon_bo_unreserve(struct radeon_bo *bo) | ||
81 | { | ||
82 | ttm_bo_unreserve(&bo->tbo); | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * radeon_bo_gpu_offset - return GPU offset of bo | ||
87 | * @bo: radeon object for which we query the offset | ||
88 | * | ||
89 | * Returns current GPU offset of the object. | ||
90 | * | ||
91 | * Note: object should either be pinned or reserved when calling this | ||
92 | * function, it might be usefull to add check for this for debugging. | ||
93 | */ | ||
94 | static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) | ||
95 | { | ||
96 | return bo->tbo.offset; | ||
97 | } | ||
98 | |||
99 | static inline unsigned long radeon_bo_size(struct radeon_bo *bo) | ||
100 | { | ||
101 | return bo->tbo.num_pages << PAGE_SHIFT; | ||
102 | } | ||
103 | |||
104 | static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) | ||
105 | { | ||
106 | return !!atomic_read(&bo->tbo.reserved); | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * radeon_bo_mmap_offset - return mmap offset of bo | ||
111 | * @bo: radeon object for which we query the offset | ||
112 | * | ||
113 | * Returns mmap offset of the object. | ||
114 | * | ||
115 | * Note: addr_space_offset is constant after ttm bo init thus isn't protected | ||
116 | * by any lock. | ||
38 | */ | 117 | */ |
39 | struct radeon_mman { | 118 | static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) |
40 | struct ttm_bo_global_ref bo_global_ref; | 119 | { |
41 | struct ttm_global_reference mem_global_ref; | 120 | return bo->tbo.addr_space_offset; |
42 | bool mem_global_referenced; | 121 | } |
43 | struct ttm_bo_device bdev; | 122 | |
44 | }; | 123 | static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, |
124 | bool no_wait) | ||
125 | { | ||
126 | int r; | ||
127 | |||
128 | retry: | ||
129 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
130 | if (unlikely(r != 0)) { | ||
131 | if (r == -ERESTART) | ||
132 | goto retry; | ||
133 | dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); | ||
134 | return r; | ||
135 | } | ||
136 | spin_lock(&bo->tbo.lock); | ||
137 | if (mem_type) | ||
138 | *mem_type = bo->tbo.mem.mem_type; | ||
139 | if (bo->tbo.sync_obj) | ||
140 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
141 | spin_unlock(&bo->tbo.lock); | ||
142 | ttm_bo_unreserve(&bo->tbo); | ||
143 | if (unlikely(r == -ERESTART)) | ||
144 | goto retry; | ||
145 | return r; | ||
146 | } | ||
147 | |||
148 | extern int radeon_bo_create(struct radeon_device *rdev, | ||
149 | struct drm_gem_object *gobj, unsigned long size, | ||
150 | bool kernel, u32 domain, | ||
151 | struct radeon_bo **bo_ptr); | ||
152 | extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); | ||
153 | extern void radeon_bo_kunmap(struct radeon_bo *bo); | ||
154 | extern void radeon_bo_unref(struct radeon_bo **bo); | ||
155 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); | ||
156 | extern int radeon_bo_unpin(struct radeon_bo *bo); | ||
157 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); | ||
158 | extern void radeon_bo_force_delete(struct radeon_device *rdev); | ||
159 | extern int radeon_bo_init(struct radeon_device *rdev); | ||
160 | extern void radeon_bo_fini(struct radeon_device *rdev); | ||
161 | extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj, | ||
162 | struct list_head *head); | ||
163 | extern int radeon_bo_list_reserve(struct list_head *head); | ||
164 | extern void radeon_bo_list_unreserve(struct list_head *head); | ||
165 | extern int radeon_bo_list_validate(struct list_head *head, void *fence); | ||
166 | extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence); | ||
167 | extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo, | ||
168 | struct vm_area_struct *vma); | ||
169 | extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo, | ||
170 | u32 tiling_flags, u32 pitch); | ||
171 | extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo, | ||
172 | u32 *tiling_flags, u32 *pitch); | ||
173 | extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, | ||
174 | bool force_drop); | ||
175 | extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
176 | struct ttm_mem_reg *mem); | ||
177 | extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | ||
45 | 178 | ||
46 | #endif | 179 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 46146c6a2a06..34b08d307c81 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev); | |||
27 | int radeon_pm_init(struct radeon_device *rdev) | 27 | int radeon_pm_init(struct radeon_device *rdev) |
28 | { | 28 | { |
29 | if (radeon_debugfs_pm_init(rdev)) { | 29 | if (radeon_debugfs_pm_init(rdev)) { |
30 | DRM_ERROR("Failed to register debugfs file for CP !\n"); | 30 | DRM_ERROR("Failed to register debugfs file for PM!\n"); |
31 | } | 31 | } |
32 | 32 | ||
33 | return 0; | 33 | return 0; |
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) | |||
44 | struct drm_device *dev = node->minor->dev; | 44 | struct drm_device *dev = node->minor->dev; |
45 | struct radeon_device *rdev = dev->dev_private; | 45 | struct radeon_device *rdev = dev->dev_private; |
46 | 46 | ||
47 | seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); | 47 | seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
48 | seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); | 48 | seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
49 | 49 | ||
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 29ab75903ec1..6d0a009dd4a1 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h | |||
@@ -887,6 +887,7 @@ | |||
887 | # define RADEON_FP_PANEL_FORMAT (1 << 3) | 887 | # define RADEON_FP_PANEL_FORMAT (1 << 3) |
888 | # define RADEON_FP_EN_TMDS (1 << 7) | 888 | # define RADEON_FP_EN_TMDS (1 << 7) |
889 | # define RADEON_FP_DETECT_SENSE (1 << 8) | 889 | # define RADEON_FP_DETECT_SENSE (1 << 8) |
890 | # define RADEON_FP_DETECT_INT_POL (1 << 9) | ||
890 | # define R200_FP_SOURCE_SEL_MASK (3 << 10) | 891 | # define R200_FP_SOURCE_SEL_MASK (3 << 10) |
891 | # define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) | 892 | # define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) |
892 | # define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) | 893 | # define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) |
@@ -894,6 +895,7 @@ | |||
894 | # define R200_FP_SOURCE_SEL_TRANS (3 << 10) | 895 | # define R200_FP_SOURCE_SEL_TRANS (3 << 10) |
895 | # define RADEON_FP_SEL_CRTC1 (0 << 13) | 896 | # define RADEON_FP_SEL_CRTC1 (0 << 13) |
896 | # define RADEON_FP_SEL_CRTC2 (1 << 13) | 897 | # define RADEON_FP_SEL_CRTC2 (1 << 13) |
898 | # define R300_HPD_SEL(x) ((x) << 13) | ||
897 | # define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) | 899 | # define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) |
898 | # define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) | 900 | # define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) |
899 | # define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) | 901 | # define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) |
@@ -909,6 +911,7 @@ | |||
909 | # define RADEON_FP2_ON (1 << 2) | 911 | # define RADEON_FP2_ON (1 << 2) |
910 | # define RADEON_FP2_PANEL_FORMAT (1 << 3) | 912 | # define RADEON_FP2_PANEL_FORMAT (1 << 3) |
911 | # define RADEON_FP2_DETECT_SENSE (1 << 8) | 913 | # define RADEON_FP2_DETECT_SENSE (1 << 8) |
914 | # define RADEON_FP2_DETECT_INT_POL (1 << 9) | ||
912 | # define R200_FP2_SOURCE_SEL_MASK (3 << 10) | 915 | # define R200_FP2_SOURCE_SEL_MASK (3 << 10) |
913 | # define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) | 916 | # define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) |
914 | # define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) | 917 | # define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) |
@@ -988,14 +991,20 @@ | |||
988 | 991 | ||
989 | #define RADEON_GEN_INT_CNTL 0x0040 | 992 | #define RADEON_GEN_INT_CNTL 0x0040 |
990 | # define RADEON_CRTC_VBLANK_MASK (1 << 0) | 993 | # define RADEON_CRTC_VBLANK_MASK (1 << 0) |
994 | # define RADEON_FP_DETECT_MASK (1 << 4) | ||
991 | # define RADEON_CRTC2_VBLANK_MASK (1 << 9) | 995 | # define RADEON_CRTC2_VBLANK_MASK (1 << 9) |
996 | # define RADEON_FP2_DETECT_MASK (1 << 10) | ||
992 | # define RADEON_SW_INT_ENABLE (1 << 25) | 997 | # define RADEON_SW_INT_ENABLE (1 << 25) |
993 | #define RADEON_GEN_INT_STATUS 0x0044 | 998 | #define RADEON_GEN_INT_STATUS 0x0044 |
994 | # define AVIVO_DISPLAY_INT_STATUS (1 << 0) | 999 | # define AVIVO_DISPLAY_INT_STATUS (1 << 0) |
995 | # define RADEON_CRTC_VBLANK_STAT (1 << 0) | 1000 | # define RADEON_CRTC_VBLANK_STAT (1 << 0) |
996 | # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) | 1001 | # define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) |
1002 | # define RADEON_FP_DETECT_STAT (1 << 4) | ||
1003 | # define RADEON_FP_DETECT_STAT_ACK (1 << 4) | ||
997 | # define RADEON_CRTC2_VBLANK_STAT (1 << 9) | 1004 | # define RADEON_CRTC2_VBLANK_STAT (1 << 9) |
998 | # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) | 1005 | # define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) |
1006 | # define RADEON_FP2_DETECT_STAT (1 << 10) | ||
1007 | # define RADEON_FP2_DETECT_STAT_ACK (1 << 10) | ||
999 | # define RADEON_SW_INT_FIRE (1 << 26) | 1008 | # define RADEON_SW_INT_FIRE (1 << 26) |
1000 | # define RADEON_SW_INT_TEST (1 << 25) | 1009 | # define RADEON_SW_INT_TEST (1 << 25) |
1001 | # define RADEON_SW_INT_TEST_ACK (1 << 25) | 1010 | # define RADEON_SW_INT_TEST_ACK (1 << 25) |
@@ -1051,20 +1060,25 @@ | |||
1051 | 1060 | ||
1052 | /* Multimedia I2C bus */ | 1061 | /* Multimedia I2C bus */ |
1053 | #define RADEON_I2C_CNTL_0 0x0090 | 1062 | #define RADEON_I2C_CNTL_0 0x0090 |
1054 | #define RADEON_I2C_DONE (1<<0) | 1063 | #define RADEON_I2C_DONE (1 << 0) |
1055 | #define RADEON_I2C_NACK (1<<1) | 1064 | #define RADEON_I2C_NACK (1 << 1) |
1056 | #define RADEON_I2C_HALT (1<<2) | 1065 | #define RADEON_I2C_HALT (1 << 2) |
1057 | #define RADEON_I2C_SOFT_RST (1<<5) | 1066 | #define RADEON_I2C_SOFT_RST (1 << 5) |
1058 | #define RADEON_I2C_DRIVE_EN (1<<6) | 1067 | #define RADEON_I2C_DRIVE_EN (1 << 6) |
1059 | #define RADEON_I2C_DRIVE_SEL (1<<7) | 1068 | #define RADEON_I2C_DRIVE_SEL (1 << 7) |
1060 | #define RADEON_I2C_START (1<<8) | 1069 | #define RADEON_I2C_START (1 << 8) |
1061 | #define RADEON_I2C_STOP (1<<9) | 1070 | #define RADEON_I2C_STOP (1 << 9) |
1062 | #define RADEON_I2C_RECEIVE (1<<10) | 1071 | #define RADEON_I2C_RECEIVE (1 << 10) |
1063 | #define RADEON_I2C_ABORT (1<<11) | 1072 | #define RADEON_I2C_ABORT (1 << 11) |
1064 | #define RADEON_I2C_GO (1<<12) | 1073 | #define RADEON_I2C_GO (1 << 12) |
1074 | #define RADEON_I2C_PRESCALE_SHIFT 16 | ||
1065 | #define RADEON_I2C_CNTL_1 0x0094 | 1075 | #define RADEON_I2C_CNTL_1 0x0094 |
1066 | #define RADEON_I2C_SEL (1<<16) | 1076 | #define RADEON_I2C_DATA_COUNT_SHIFT 0 |
1067 | #define RADEON_I2C_EN (1<<17) | 1077 | #define RADEON_I2C_ADDR_COUNT_SHIFT 4 |
1078 | #define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 | ||
1079 | #define RADEON_I2C_SEL (1 << 16) | ||
1080 | #define RADEON_I2C_EN (1 << 17) | ||
1081 | #define RADEON_I2C_TIME_LIMIT_SHIFT 24 | ||
1068 | #define RADEON_I2C_DATA 0x0098 | 1082 | #define RADEON_I2C_DATA 0x0098 |
1069 | 1083 | ||
1070 | #define RADEON_DVI_I2C_CNTL_0 0x02e0 | 1084 | #define RADEON_DVI_I2C_CNTL_0 0x02e0 |
@@ -1072,7 +1086,7 @@ | |||
1072 | # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ | 1086 | # define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ |
1073 | # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ | 1087 | # define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ |
1074 | # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ | 1088 | # define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ |
1075 | #define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ | 1089 | #define RADEON_DVI_I2C_CNTL_1 0x02e4 |
1076 | #define RADEON_DVI_I2C_DATA 0x02e8 | 1090 | #define RADEON_DVI_I2C_DATA 0x02e8 |
1077 | 1091 | ||
1078 | #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ | 1092 | #define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ |
@@ -1143,15 +1157,16 @@ | |||
1143 | # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) | 1157 | # define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) |
1144 | # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) | 1158 | # define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) |
1145 | # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) | 1159 | # define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) |
1146 | #define RADEON_LCD_GPIO_MASK 0x01a0 | 1160 | |
1147 | #define RADEON_GPIOPAD_EN 0x01a0 | ||
1148 | #define RADEON_LCD_GPIO_Y_REG 0x01a4 | ||
1149 | #define RADEON_MDGPIO_A_REG 0x01ac | ||
1150 | #define RADEON_MDGPIO_EN_REG 0x01b0 | ||
1151 | #define RADEON_MDGPIO_MASK 0x0198 | ||
1152 | #define RADEON_GPIOPAD_MASK 0x0198 | 1161 | #define RADEON_GPIOPAD_MASK 0x0198 |
1153 | #define RADEON_GPIOPAD_A 0x019c | 1162 | #define RADEON_GPIOPAD_A 0x019c |
1154 | #define RADEON_MDGPIO_Y_REG 0x01b4 | 1163 | #define RADEON_GPIOPAD_EN 0x01a0 |
1164 | #define RADEON_GPIOPAD_Y 0x01a4 | ||
1165 | #define RADEON_MDGPIO_MASK 0x01a8 | ||
1166 | #define RADEON_MDGPIO_A 0x01ac | ||
1167 | #define RADEON_MDGPIO_EN 0x01b0 | ||
1168 | #define RADEON_MDGPIO_Y 0x01b4 | ||
1169 | |||
1155 | #define RADEON_MEM_ADDR_CONFIG 0x0148 | 1170 | #define RADEON_MEM_ADDR_CONFIG 0x0148 |
1156 | #define RADEON_MEM_BASE 0x0f10 /* PCI */ | 1171 | #define RADEON_MEM_BASE 0x0f10 /* PCI */ |
1157 | #define RADEON_MEM_CNTL 0x0140 | 1172 | #define RADEON_MEM_CNTL 0x0140 |
@@ -1360,6 +1375,9 @@ | |||
1360 | #define RADEON_OVR_CLR 0x0230 | 1375 | #define RADEON_OVR_CLR 0x0230 |
1361 | #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 | 1376 | #define RADEON_OVR_WID_LEFT_RIGHT 0x0234 |
1362 | #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 | 1377 | #define RADEON_OVR_WID_TOP_BOTTOM 0x0238 |
1378 | #define RADEON_OVR2_CLR 0x0330 | ||
1379 | #define RADEON_OVR2_WID_LEFT_RIGHT 0x0334 | ||
1380 | #define RADEON_OVR2_WID_TOP_BOTTOM 0x0338 | ||
1363 | 1381 | ||
1364 | /* first capture unit */ | 1382 | /* first capture unit */ |
1365 | 1383 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 747b4bffb84b..4d12b2d17b4d 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
165 | return 0; | 165 | return 0; |
166 | /* Allocate 1M object buffer */ | 166 | /* Allocate 1M object buffer */ |
167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); | 167 | INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
168 | r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, | 168 | r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
169 | true, RADEON_GEM_DOMAIN_GTT, | 169 | true, RADEON_GEM_DOMAIN_GTT, |
170 | false, &rdev->ib_pool.robj); | 170 | &rdev->ib_pool.robj); |
171 | if (r) { | 171 | if (r) { |
172 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | 172 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); |
173 | return r; | 173 | return r; |
174 | } | 174 | } |
175 | r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | 175 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
176 | if (unlikely(r != 0)) | ||
177 | return r; | ||
178 | r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | ||
176 | if (r) { | 179 | if (r) { |
180 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
177 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); | 181 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); |
178 | return r; | 182 | return r; |
179 | } | 183 | } |
180 | r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); | 184 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); |
185 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
181 | if (r) { | 186 | if (r) { |
182 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); | 187 | DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); |
183 | return r; | 188 | return r; |
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev) | |||
203 | 208 | ||
204 | void radeon_ib_pool_fini(struct radeon_device *rdev) | 209 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
205 | { | 210 | { |
211 | int r; | ||
212 | |||
206 | if (!rdev->ib_pool.ready) { | 213 | if (!rdev->ib_pool.ready) { |
207 | return; | 214 | return; |
208 | } | 215 | } |
209 | mutex_lock(&rdev->ib_pool.mutex); | 216 | mutex_lock(&rdev->ib_pool.mutex); |
210 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); | 217 | bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
211 | if (rdev->ib_pool.robj) { | 218 | if (rdev->ib_pool.robj) { |
212 | radeon_object_kunmap(rdev->ib_pool.robj); | 219 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); |
213 | radeon_object_unref(&rdev->ib_pool.robj); | 220 | if (likely(r == 0)) { |
221 | radeon_bo_kunmap(rdev->ib_pool.robj); | ||
222 | radeon_bo_unpin(rdev->ib_pool.robj); | ||
223 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
224 | } | ||
225 | radeon_bo_unref(&rdev->ib_pool.robj); | ||
214 | rdev->ib_pool.robj = NULL; | 226 | rdev->ib_pool.robj = NULL; |
215 | } | 227 | } |
216 | mutex_unlock(&rdev->ib_pool.mutex); | 228 | mutex_unlock(&rdev->ib_pool.mutex); |
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
288 | rdev->cp.ring_size = ring_size; | 300 | rdev->cp.ring_size = ring_size; |
289 | /* Allocate ring buffer */ | 301 | /* Allocate ring buffer */ |
290 | if (rdev->cp.ring_obj == NULL) { | 302 | if (rdev->cp.ring_obj == NULL) { |
291 | r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, | 303 | r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, |
292 | true, | 304 | RADEON_GEM_DOMAIN_GTT, |
293 | RADEON_GEM_DOMAIN_GTT, | 305 | &rdev->cp.ring_obj); |
294 | false, | ||
295 | &rdev->cp.ring_obj); | ||
296 | if (r) { | 306 | if (r) { |
297 | DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); | 307 | dev_err(rdev->dev, "(%d) ring create failed\n", r); |
298 | mutex_unlock(&rdev->cp.mutex); | ||
299 | return r; | 308 | return r; |
300 | } | 309 | } |
301 | r = radeon_object_pin(rdev->cp.ring_obj, | 310 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); |
302 | RADEON_GEM_DOMAIN_GTT, | 311 | if (unlikely(r != 0)) |
303 | &rdev->cp.gpu_addr); | 312 | return r; |
313 | r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT, | ||
314 | &rdev->cp.gpu_addr); | ||
304 | if (r) { | 315 | if (r) { |
305 | DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); | 316 | radeon_bo_unreserve(rdev->cp.ring_obj); |
306 | mutex_unlock(&rdev->cp.mutex); | 317 | dev_err(rdev->dev, "(%d) ring pin failed\n", r); |
307 | return r; | 318 | return r; |
308 | } | 319 | } |
309 | r = radeon_object_kmap(rdev->cp.ring_obj, | 320 | r = radeon_bo_kmap(rdev->cp.ring_obj, |
310 | (void **)&rdev->cp.ring); | 321 | (void **)&rdev->cp.ring); |
322 | radeon_bo_unreserve(rdev->cp.ring_obj); | ||
311 | if (r) { | 323 | if (r) { |
312 | DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); | 324 | dev_err(rdev->dev, "(%d) ring map failed\n", r); |
313 | mutex_unlock(&rdev->cp.mutex); | ||
314 | return r; | 325 | return r; |
315 | } | 326 | } |
316 | } | 327 | } |
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
321 | 332 | ||
322 | void radeon_ring_fini(struct radeon_device *rdev) | 333 | void radeon_ring_fini(struct radeon_device *rdev) |
323 | { | 334 | { |
335 | int r; | ||
336 | |||
324 | mutex_lock(&rdev->cp.mutex); | 337 | mutex_lock(&rdev->cp.mutex); |
325 | if (rdev->cp.ring_obj) { | 338 | if (rdev->cp.ring_obj) { |
326 | radeon_object_kunmap(rdev->cp.ring_obj); | 339 | r = radeon_bo_reserve(rdev->cp.ring_obj, false); |
327 | radeon_object_unpin(rdev->cp.ring_obj); | 340 | if (likely(r == 0)) { |
328 | radeon_object_unref(&rdev->cp.ring_obj); | 341 | radeon_bo_kunmap(rdev->cp.ring_obj); |
342 | radeon_bo_unpin(rdev->cp.ring_obj); | ||
343 | radeon_bo_unreserve(rdev->cp.ring_obj); | ||
344 | } | ||
345 | radeon_bo_unref(&rdev->cp.ring_obj); | ||
329 | rdev->cp.ring = NULL; | 346 | rdev->cp.ring = NULL; |
330 | rdev->cp.ring_obj = NULL; | 347 | rdev->cp.ring_obj = NULL; |
331 | } | 348 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index f8a465d9a1cf..391c973ec4db 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
@@ -30,8 +30,8 @@ | |||
30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ | 30 | /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ |
31 | void radeon_test_moves(struct radeon_device *rdev) | 31 | void radeon_test_moves(struct radeon_device *rdev) |
32 | { | 32 | { |
33 | struct radeon_object *vram_obj = NULL; | 33 | struct radeon_bo *vram_obj = NULL; |
34 | struct radeon_object **gtt_obj = NULL; | 34 | struct radeon_bo **gtt_obj = NULL; |
35 | struct radeon_fence *fence = NULL; | 35 | struct radeon_fence *fence = NULL; |
36 | uint64_t gtt_addr, vram_addr; | 36 | uint64_t gtt_addr, vram_addr; |
37 | unsigned i, n, size; | 37 | unsigned i, n, size; |
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
52 | goto out_cleanup; | 52 | goto out_cleanup; |
53 | } | 53 | } |
54 | 54 | ||
55 | r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, | 55 | r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, |
56 | false, &vram_obj); | 56 | &vram_obj); |
57 | if (r) { | 57 | if (r) { |
58 | DRM_ERROR("Failed to create VRAM object\n"); | 58 | DRM_ERROR("Failed to create VRAM object\n"); |
59 | goto out_cleanup; | 59 | goto out_cleanup; |
60 | } | 60 | } |
61 | 61 | r = radeon_bo_reserve(vram_obj, false); | |
62 | r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | 62 | if (unlikely(r != 0)) |
63 | goto out_cleanup; | ||
64 | r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); | ||
63 | if (r) { | 65 | if (r) { |
64 | DRM_ERROR("Failed to pin VRAM object\n"); | 66 | DRM_ERROR("Failed to pin VRAM object\n"); |
65 | goto out_cleanup; | 67 | goto out_cleanup; |
66 | } | 68 | } |
67 | |||
68 | for (i = 0; i < n; i++) { | 69 | for (i = 0; i < n; i++) { |
69 | void *gtt_map, *vram_map; | 70 | void *gtt_map, *vram_map; |
70 | void **gtt_start, **gtt_end; | 71 | void **gtt_start, **gtt_end; |
71 | void **vram_start, **vram_end; | 72 | void **vram_start, **vram_end; |
72 | 73 | ||
73 | r = radeon_object_create(rdev, NULL, size, true, | 74 | r = radeon_bo_create(rdev, NULL, size, true, |
74 | RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); | 75 | RADEON_GEM_DOMAIN_GTT, gtt_obj + i); |
75 | if (r) { | 76 | if (r) { |
76 | DRM_ERROR("Failed to create GTT object %d\n", i); | 77 | DRM_ERROR("Failed to create GTT object %d\n", i); |
77 | goto out_cleanup; | 78 | goto out_cleanup; |
78 | } | 79 | } |
79 | 80 | ||
80 | r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | 81 | r = radeon_bo_reserve(gtt_obj[i], false); |
82 | if (unlikely(r != 0)) | ||
83 | goto out_cleanup; | ||
84 | r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, >t_addr); | ||
81 | if (r) { | 85 | if (r) { |
82 | DRM_ERROR("Failed to pin GTT object %d\n", i); | 86 | DRM_ERROR("Failed to pin GTT object %d\n", i); |
83 | goto out_cleanup; | 87 | goto out_cleanup; |
84 | } | 88 | } |
85 | 89 | ||
86 | r = radeon_object_kmap(gtt_obj[i], >t_map); | 90 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
87 | if (r) { | 91 | if (r) { |
88 | DRM_ERROR("Failed to map GTT object %d\n", i); | 92 | DRM_ERROR("Failed to map GTT object %d\n", i); |
89 | goto out_cleanup; | 93 | goto out_cleanup; |
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
94 | gtt_start++) | 98 | gtt_start++) |
95 | *gtt_start = gtt_start; | 99 | *gtt_start = gtt_start; |
96 | 100 | ||
97 | radeon_object_kunmap(gtt_obj[i]); | 101 | radeon_bo_kunmap(gtt_obj[i]); |
98 | 102 | ||
99 | r = radeon_fence_create(rdev, &fence); | 103 | r = radeon_fence_create(rdev, &fence); |
100 | if (r) { | 104 | if (r) { |
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
116 | 120 | ||
117 | radeon_fence_unref(&fence); | 121 | radeon_fence_unref(&fence); |
118 | 122 | ||
119 | r = radeon_object_kmap(vram_obj, &vram_map); | 123 | r = radeon_bo_kmap(vram_obj, &vram_map); |
120 | if (r) { | 124 | if (r) { |
121 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); | 125 | DRM_ERROR("Failed to map VRAM object after copy %d\n", i); |
122 | goto out_cleanup; | 126 | goto out_cleanup; |
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
131 | "expected 0x%p (GTT map 0x%p-0x%p)\n", | 135 | "expected 0x%p (GTT map 0x%p-0x%p)\n", |
132 | i, *vram_start, gtt_start, gtt_map, | 136 | i, *vram_start, gtt_start, gtt_map, |
133 | gtt_end); | 137 | gtt_end); |
134 | radeon_object_kunmap(vram_obj); | 138 | radeon_bo_kunmap(vram_obj); |
135 | goto out_cleanup; | 139 | goto out_cleanup; |
136 | } | 140 | } |
137 | *vram_start = vram_start; | 141 | *vram_start = vram_start; |
138 | } | 142 | } |
139 | 143 | ||
140 | radeon_object_kunmap(vram_obj); | 144 | radeon_bo_kunmap(vram_obj); |
141 | 145 | ||
142 | r = radeon_fence_create(rdev, &fence); | 146 | r = radeon_fence_create(rdev, &fence); |
143 | if (r) { | 147 | if (r) { |
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
159 | 163 | ||
160 | radeon_fence_unref(&fence); | 164 | radeon_fence_unref(&fence); |
161 | 165 | ||
162 | r = radeon_object_kmap(gtt_obj[i], >t_map); | 166 | r = radeon_bo_kmap(gtt_obj[i], >t_map); |
163 | if (r) { | 167 | if (r) { |
164 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); | 168 | DRM_ERROR("Failed to map GTT object after copy %d\n", i); |
165 | goto out_cleanup; | 169 | goto out_cleanup; |
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
174 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", | 178 | "expected 0x%p (VRAM map 0x%p-0x%p)\n", |
175 | i, *gtt_start, vram_start, vram_map, | 179 | i, *gtt_start, vram_start, vram_map, |
176 | vram_end); | 180 | vram_end); |
177 | radeon_object_kunmap(gtt_obj[i]); | 181 | radeon_bo_kunmap(gtt_obj[i]); |
178 | goto out_cleanup; | 182 | goto out_cleanup; |
179 | } | 183 | } |
180 | } | 184 | } |
181 | 185 | ||
182 | radeon_object_kunmap(gtt_obj[i]); | 186 | radeon_bo_kunmap(gtt_obj[i]); |
183 | 187 | ||
184 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", | 188 | DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", |
185 | gtt_addr - rdev->mc.gtt_location); | 189 | gtt_addr - rdev->mc.gtt_location); |
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
187 | 191 | ||
188 | out_cleanup: | 192 | out_cleanup: |
189 | if (vram_obj) { | 193 | if (vram_obj) { |
190 | radeon_object_unpin(vram_obj); | 194 | if (radeon_bo_is_reserved(vram_obj)) { |
191 | radeon_object_unref(&vram_obj); | 195 | radeon_bo_unpin(vram_obj); |
196 | radeon_bo_unreserve(vram_obj); | ||
197 | } | ||
198 | radeon_bo_unref(&vram_obj); | ||
192 | } | 199 | } |
193 | if (gtt_obj) { | 200 | if (gtt_obj) { |
194 | for (i = 0; i < n; i++) { | 201 | for (i = 0; i < n; i++) { |
195 | if (gtt_obj[i]) { | 202 | if (gtt_obj[i]) { |
196 | radeon_object_unpin(gtt_obj[i]); | 203 | if (radeon_bo_is_reserved(gtt_obj[i])) { |
197 | radeon_object_unref(>t_obj[i]); | 204 | radeon_bo_unpin(gtt_obj[i]); |
205 | radeon_bo_unreserve(gtt_obj[i]); | ||
206 | } | ||
207 | radeon_bo_unref(>t_obj[i]); | ||
198 | } | 208 | } |
199 | } | 209 | } |
200 | kfree(gtt_obj); | 210 | kfree(gtt_obj); |
@@ -206,4 +216,3 @@ out_cleanup: | |||
206 | printk(KERN_WARNING "Error while testing BO move.\n"); | 216 | printk(KERN_WARNING "Error while testing BO move.\n"); |
207 | } | 217 | } |
208 | } | 218 | } |
209 | |||
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 1381e06d6af3..bdb46c8cadd1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
150 | man->default_caching = TTM_PL_FLAG_CACHED; | 150 | man->default_caching = TTM_PL_FLAG_CACHED; |
151 | break; | 151 | break; |
152 | case TTM_PL_TT: | 152 | case TTM_PL_TT: |
153 | man->gpu_offset = 0; | 153 | man->gpu_offset = rdev->mc.gtt_location; |
154 | man->available_caching = TTM_PL_MASK_CACHING; | 154 | man->available_caching = TTM_PL_MASK_CACHING; |
155 | man->default_caching = TTM_PL_FLAG_CACHED; | 155 | man->default_caching = TTM_PL_FLAG_CACHED; |
156 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | 156 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |||
180 | break; | 180 | break; |
181 | case TTM_PL_VRAM: | 181 | case TTM_PL_VRAM: |
182 | /* "On-card" video ram */ | 182 | /* "On-card" video ram */ |
183 | man->gpu_offset = 0; | 183 | man->gpu_offset = rdev->mc.vram_location; |
184 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | 184 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
185 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | | 185 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | |
186 | TTM_MEMTYPE_FLAG_MAPPABLE; | 186 | TTM_MEMTYPE_FLAG_MAPPABLE; |
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
482 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 482 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
483 | return r; | 483 | return r; |
484 | } | 484 | } |
485 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, | 485 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
486 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); | 486 | 0, rdev->mc.real_vram_size >> PAGE_SHIFT); |
487 | if (r) { | 487 | if (r) { |
488 | DRM_ERROR("Failed initializing VRAM heap.\n"); | 488 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
489 | return r; | 489 | return r; |
490 | } | 490 | } |
491 | r = radeon_object_create(rdev, NULL, 256 * 1024, true, | 491 | r = radeon_bo_create(rdev, NULL, 256 * 1024, true, |
492 | RADEON_GEM_DOMAIN_VRAM, false, | 492 | RADEON_GEM_DOMAIN_VRAM, |
493 | &rdev->stollen_vga_memory); | 493 | &rdev->stollen_vga_memory); |
494 | if (r) { | 494 | if (r) { |
495 | return r; | 495 | return r; |
496 | } | 496 | } |
497 | r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | 497 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
498 | if (r) | ||
499 | return r; | ||
500 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | ||
501 | radeon_bo_unreserve(rdev->stollen_vga_memory); | ||
498 | if (r) { | 502 | if (r) { |
499 | radeon_object_unref(&rdev->stollen_vga_memory); | 503 | radeon_bo_unref(&rdev->stollen_vga_memory); |
500 | return r; | 504 | return r; |
501 | } | 505 | } |
502 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 506 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
503 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); | 507 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); |
504 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, | 508 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
505 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); | 509 | 0, rdev->mc.gtt_size >> PAGE_SHIFT); |
506 | if (r) { | 510 | if (r) { |
507 | DRM_ERROR("Failed initializing GTT heap.\n"); | 511 | DRM_ERROR("Failed initializing GTT heap.\n"); |
508 | return r; | 512 | return r; |
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
523 | 527 | ||
524 | void radeon_ttm_fini(struct radeon_device *rdev) | 528 | void radeon_ttm_fini(struct radeon_device *rdev) |
525 | { | 529 | { |
530 | int r; | ||
531 | |||
526 | if (rdev->stollen_vga_memory) { | 532 | if (rdev->stollen_vga_memory) { |
527 | radeon_object_unpin(rdev->stollen_vga_memory); | 533 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
528 | radeon_object_unref(&rdev->stollen_vga_memory); | 534 | if (r == 0) { |
535 | radeon_bo_unpin(rdev->stollen_vga_memory); | ||
536 | radeon_bo_unreserve(rdev->stollen_vga_memory); | ||
537 | } | ||
538 | radeon_bo_unref(&rdev->stollen_vga_memory); | ||
529 | } | 539 | } |
530 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 540 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
531 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | 541 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index ca037160a582..eda6d757b5c4 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev) | |||
352 | u32 tmp; | 352 | u32 tmp; |
353 | 353 | ||
354 | /* Setup GPU memory space */ | 354 | /* Setup GPU memory space */ |
355 | tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); | 355 | tmp = RREG32(R_00015C_NB_TOM); |
356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; | 356 | rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; |
357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | 357 | rdev->mc.gtt_location = 0xFFFFFFFFUL; |
358 | r = radeon_mc_setup(rdev); | 358 | r = radeon_mc_setup(rdev); |
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev) | |||
387 | r300_clock_startup(rdev); | 387 | r300_clock_startup(rdev); |
388 | /* Initialize GPU configuration (# pipes, ...) */ | 388 | /* Initialize GPU configuration (# pipes, ...) */ |
389 | rs400_gpu_init(rdev); | 389 | rs400_gpu_init(rdev); |
390 | r100_enable_bm(rdev); | ||
390 | /* Initialize GART (initialize after TTM so we can allocate | 391 | /* Initialize GART (initialize after TTM so we can allocate |
391 | * memory through TTM but finalize after TTM) */ | 392 | * memory through TTM but finalize after TTM) */ |
392 | r = rs400_gart_enable(rdev); | 393 | r = rs400_gart_enable(rdev); |
393 | if (r) | 394 | if (r) |
394 | return r; | 395 | return r; |
395 | /* Enable IRQ */ | 396 | /* Enable IRQ */ |
396 | rdev->irq.sw_int = true; | ||
397 | r100_irq_set(rdev); | 397 | r100_irq_set(rdev); |
398 | /* 1M ring buffer */ | 398 | /* 1M ring buffer */ |
399 | r = r100_cp_init(rdev, 1024 * 1024); | 399 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -452,7 +452,7 @@ void rs400_fini(struct radeon_device *rdev) | |||
452 | rs400_gart_fini(rdev); | 452 | rs400_gart_fini(rdev); |
453 | radeon_irq_kms_fini(rdev); | 453 | radeon_irq_kms_fini(rdev); |
454 | radeon_fence_driver_fini(rdev); | 454 | radeon_fence_driver_fini(rdev); |
455 | radeon_object_fini(rdev); | 455 | radeon_bo_fini(rdev); |
456 | radeon_atombios_fini(rdev); | 456 | radeon_atombios_fini(rdev); |
457 | kfree(rdev->bios); | 457 | kfree(rdev->bios); |
458 | rdev->bios = NULL; | 458 | rdev->bios = NULL; |
@@ -490,10 +490,9 @@ int rs400_init(struct radeon_device *rdev) | |||
490 | RREG32(R_0007C0_CP_STAT)); | 490 | RREG32(R_0007C0_CP_STAT)); |
491 | } | 491 | } |
492 | /* check if cards are posted or not */ | 492 | /* check if cards are posted or not */ |
493 | if (!radeon_card_posted(rdev) && rdev->bios) { | 493 | if (radeon_boot_test_post_card(rdev) == false) |
494 | DRM_INFO("GPU not posted. posting now...\n"); | 494 | return -EINVAL; |
495 | radeon_combios_asic_init(rdev->ddev); | 495 | |
496 | } | ||
497 | /* Initialize clocks */ | 496 | /* Initialize clocks */ |
498 | radeon_get_clock_info(rdev->ddev); | 497 | radeon_get_clock_info(rdev->ddev); |
499 | /* Get vram informations */ | 498 | /* Get vram informations */ |
@@ -510,7 +509,7 @@ int rs400_init(struct radeon_device *rdev) | |||
510 | if (r) | 509 | if (r) |
511 | return r; | 510 | return r; |
512 | /* Memory manager */ | 511 | /* Memory manager */ |
513 | r = radeon_object_init(rdev); | 512 | r = radeon_bo_init(rdev); |
514 | if (r) | 513 | if (r) |
515 | return r; | 514 | return r; |
516 | r = rs400_gart_init(rdev); | 515 | r = rs400_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5f117cd8736a..fd5ab01f6ad1 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -45,6 +45,122 @@ | |||
45 | void rs600_gpu_init(struct radeon_device *rdev); | 45 | void rs600_gpu_init(struct radeon_device *rdev); |
46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); | 46 | int rs600_mc_wait_for_idle(struct radeon_device *rdev); |
47 | 47 | ||
48 | int rs600_mc_init(struct radeon_device *rdev) | ||
49 | { | ||
50 | /* read back the MC value from the hw */ | ||
51 | int r; | ||
52 | u32 tmp; | ||
53 | |||
54 | /* Setup GPU memory space */ | ||
55 | tmp = RREG32_MC(R_000004_MC_FB_LOCATION); | ||
56 | rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; | ||
57 | rdev->mc.gtt_location = 0xffffffffUL; | ||
58 | r = radeon_mc_setup(rdev); | ||
59 | if (r) | ||
60 | return r; | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* hpd for digital panel detect/disconnect */ | ||
65 | bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) | ||
66 | { | ||
67 | u32 tmp; | ||
68 | bool connected = false; | ||
69 | |||
70 | switch (hpd) { | ||
71 | case RADEON_HPD_1: | ||
72 | tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS); | ||
73 | if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp)) | ||
74 | connected = true; | ||
75 | break; | ||
76 | case RADEON_HPD_2: | ||
77 | tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS); | ||
78 | if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp)) | ||
79 | connected = true; | ||
80 | break; | ||
81 | default: | ||
82 | break; | ||
83 | } | ||
84 | return connected; | ||
85 | } | ||
86 | |||
87 | void rs600_hpd_set_polarity(struct radeon_device *rdev, | ||
88 | enum radeon_hpd_id hpd) | ||
89 | { | ||
90 | u32 tmp; | ||
91 | bool connected = rs600_hpd_sense(rdev, hpd); | ||
92 | |||
93 | switch (hpd) { | ||
94 | case RADEON_HPD_1: | ||
95 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | ||
96 | if (connected) | ||
97 | tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); | ||
98 | else | ||
99 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1); | ||
100 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | ||
101 | break; | ||
102 | case RADEON_HPD_2: | ||
103 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | ||
104 | if (connected) | ||
105 | tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); | ||
106 | else | ||
107 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1); | ||
108 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | ||
109 | break; | ||
110 | default: | ||
111 | break; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | void rs600_hpd_init(struct radeon_device *rdev) | ||
116 | { | ||
117 | struct drm_device *dev = rdev->ddev; | ||
118 | struct drm_connector *connector; | ||
119 | |||
120 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
121 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
122 | switch (radeon_connector->hpd.hpd) { | ||
123 | case RADEON_HPD_1: | ||
124 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, | ||
125 | S_007D00_DC_HOT_PLUG_DETECT1_EN(1)); | ||
126 | rdev->irq.hpd[0] = true; | ||
127 | break; | ||
128 | case RADEON_HPD_2: | ||
129 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, | ||
130 | S_007D10_DC_HOT_PLUG_DETECT2_EN(1)); | ||
131 | rdev->irq.hpd[1] = true; | ||
132 | break; | ||
133 | default: | ||
134 | break; | ||
135 | } | ||
136 | } | ||
137 | rs600_irq_set(rdev); | ||
138 | } | ||
139 | |||
140 | void rs600_hpd_fini(struct radeon_device *rdev) | ||
141 | { | ||
142 | struct drm_device *dev = rdev->ddev; | ||
143 | struct drm_connector *connector; | ||
144 | |||
145 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
146 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); | ||
147 | switch (radeon_connector->hpd.hpd) { | ||
148 | case RADEON_HPD_1: | ||
149 | WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL, | ||
150 | S_007D00_DC_HOT_PLUG_DETECT1_EN(0)); | ||
151 | rdev->irq.hpd[0] = false; | ||
152 | break; | ||
153 | case RADEON_HPD_2: | ||
154 | WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL, | ||
155 | S_007D10_DC_HOT_PLUG_DETECT2_EN(0)); | ||
156 | rdev->irq.hpd[1] = false; | ||
157 | break; | ||
158 | default: | ||
159 | break; | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | |||
48 | /* | 164 | /* |
49 | * GART. | 165 | * GART. |
50 | */ | 166 | */ |
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
100 | WREG32(R_00004C_BUS_CNTL, tmp); | 216 | WREG32(R_00004C_BUS_CNTL, tmp); |
101 | /* FIXME: setup default page */ | 217 | /* FIXME: setup default page */ |
102 | WREG32_MC(R_000100_MC_PT0_CNTL, | 218 | WREG32_MC(R_000100_MC_PT0_CNTL, |
103 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | | 219 | (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | |
104 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); | 220 | S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); |
221 | |||
105 | for (i = 0; i < 19; i++) { | 222 | for (i = 0; i < 19; i++) { |
106 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, | 223 | WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, |
107 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | | 224 | S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | |
108 | S_00016C_SYSTEM_ACCESS_MODE_MASK( | 225 | S_00016C_SYSTEM_ACCESS_MODE_MASK( |
109 | V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | | 226 | V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) | |
110 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( | 227 | S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( |
111 | V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | | 228 | V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) | |
112 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | | 229 | S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) | |
113 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | | 230 | S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | |
114 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); | 231 | S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3)); |
115 | } | 232 | } |
116 | |||
117 | /* System context map to GART space */ | ||
118 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); | ||
119 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); | ||
120 | |||
121 | /* enable first context */ | 233 | /* enable first context */ |
122 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); | ||
123 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); | ||
124 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, | 234 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, |
125 | S_000102_ENABLE_PAGE_TABLE(1) | | 235 | S_000102_ENABLE_PAGE_TABLE(1) | |
126 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); | 236 | S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); |
237 | |||
127 | /* disable all other contexts */ | 238 | /* disable all other contexts */ |
128 | for (i = 1; i < 8; i++) { | 239 | for (i = 1; i < 8; i++) |
129 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); | 240 | WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); |
130 | } | ||
131 | 241 | ||
132 | /* setup the page table */ | 242 | /* setup the page table */ |
133 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, | 243 | WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, |
134 | rdev->gart.table_addr); | 244 | rdev->gart.table_addr); |
245 | WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); | ||
246 | WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); | ||
135 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); | 247 | WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); |
136 | 248 | ||
249 | /* System context maps to VRAM space */ | ||
250 | WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start); | ||
251 | WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end); | ||
252 | |||
137 | /* enable page tables */ | 253 | /* enable page tables */ |
138 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); | 254 | tmp = RREG32_MC(R_000100_MC_PT0_CNTL); |
139 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); | 255 | WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); |
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev) | |||
146 | 262 | ||
147 | void rs600_gart_disable(struct radeon_device *rdev) | 263 | void rs600_gart_disable(struct radeon_device *rdev) |
148 | { | 264 | { |
149 | uint32_t tmp; | 265 | u32 tmp; |
266 | int r; | ||
150 | 267 | ||
151 | /* FIXME: disable out of gart access */ | 268 | /* FIXME: disable out of gart access */ |
152 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); | 269 | WREG32_MC(R_000100_MC_PT0_CNTL, 0); |
153 | tmp = RREG32_MC(R_000009_MC_CNTL1); | 270 | tmp = RREG32_MC(R_000009_MC_CNTL1); |
154 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); | 271 | WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); |
155 | if (rdev->gart.table.vram.robj) { | 272 | if (rdev->gart.table.vram.robj) { |
156 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 273 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
157 | radeon_object_unpin(rdev->gart.table.vram.robj); | 274 | if (r == 0) { |
275 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
276 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
277 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
278 | } | ||
158 | } | 279 | } |
159 | } | 280 | } |
160 | 281 | ||
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
189 | { | 310 | { |
190 | uint32_t tmp = 0; | 311 | uint32_t tmp = 0; |
191 | uint32_t mode_int = 0; | 312 | uint32_t mode_int = 0; |
313 | u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) & | ||
314 | ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); | ||
315 | u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) & | ||
316 | ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | ||
192 | 317 | ||
193 | if (rdev->irq.sw_int) { | 318 | if (rdev->irq.sw_int) { |
194 | tmp |= S_000040_SW_INT_EN(1); | 319 | tmp |= S_000040_SW_INT_EN(1); |
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev) | |||
199 | if (rdev->irq.crtc_vblank_int[1]) { | 324 | if (rdev->irq.crtc_vblank_int[1]) { |
200 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); | 325 | mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); |
201 | } | 326 | } |
327 | if (rdev->irq.hpd[0]) { | ||
328 | hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1); | ||
329 | } | ||
330 | if (rdev->irq.hpd[1]) { | ||
331 | hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); | ||
332 | } | ||
202 | WREG32(R_000040_GEN_INT_CNTL, tmp); | 333 | WREG32(R_000040_GEN_INT_CNTL, tmp); |
203 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); | 334 | WREG32(R_006540_DxMODE_INT_MASK, mode_int); |
335 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1); | ||
336 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2); | ||
204 | return 0; | 337 | return 0; |
205 | } | 338 | } |
206 | 339 | ||
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
208 | { | 341 | { |
209 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); | 342 | uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); |
210 | uint32_t irq_mask = ~C_000044_SW_INT; | 343 | uint32_t irq_mask = ~C_000044_SW_INT; |
344 | u32 tmp; | ||
211 | 345 | ||
212 | if (G_000044_DISPLAY_INT_STAT(irqs)) { | 346 | if (G_000044_DISPLAY_INT_STAT(irqs)) { |
213 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); | 347 | *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); |
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_ | |||
219 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, | 353 | WREG32(R_006D34_D2MODE_VBLANK_STATUS, |
220 | S_006D34_D2MODE_VBLANK_ACK(1)); | 354 | S_006D34_D2MODE_VBLANK_ACK(1)); |
221 | } | 355 | } |
356 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) { | ||
357 | tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL); | ||
358 | tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1); | ||
359 | WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp); | ||
360 | } | ||
361 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) { | ||
362 | tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL); | ||
363 | tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1); | ||
364 | WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp); | ||
365 | } | ||
222 | } else { | 366 | } else { |
223 | *r500_disp_int = 0; | 367 | *r500_disp_int = 0; |
224 | } | 368 | } |
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
244 | { | 388 | { |
245 | uint32_t status, msi_rearm; | 389 | uint32_t status, msi_rearm; |
246 | uint32_t r500_disp_int; | 390 | uint32_t r500_disp_int; |
391 | bool queue_hotplug = false; | ||
247 | 392 | ||
248 | status = rs600_irq_ack(rdev, &r500_disp_int); | 393 | status = rs600_irq_ack(rdev, &r500_disp_int); |
249 | if (!status && !r500_disp_int) { | 394 | if (!status && !r500_disp_int) { |
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev) | |||
258 | drm_handle_vblank(rdev->ddev, 0); | 403 | drm_handle_vblank(rdev->ddev, 0); |
259 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) | 404 | if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) |
260 | drm_handle_vblank(rdev->ddev, 1); | 405 | drm_handle_vblank(rdev->ddev, 1); |
406 | if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { | ||
407 | queue_hotplug = true; | ||
408 | DRM_DEBUG("HPD1\n"); | ||
409 | } | ||
410 | if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) { | ||
411 | queue_hotplug = true; | ||
412 | DRM_DEBUG("HPD2\n"); | ||
413 | } | ||
261 | status = rs600_irq_ack(rdev, &r500_disp_int); | 414 | status = rs600_irq_ack(rdev, &r500_disp_int); |
262 | } | 415 | } |
416 | if (queue_hotplug) | ||
417 | queue_work(rdev->wq, &rdev->hotplug_work); | ||
263 | if (rdev->msi_enabled) { | 418 | if (rdev->msi_enabled) { |
264 | switch (rdev->family) { | 419 | switch (rdev->family) { |
265 | case CHIP_RS600: | 420 | case CHIP_RS600: |
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev) | |||
301 | 456 | ||
302 | void rs600_gpu_init(struct radeon_device *rdev) | 457 | void rs600_gpu_init(struct radeon_device *rdev) |
303 | { | 458 | { |
304 | /* FIXME: HDP same place on rs600 ? */ | ||
305 | r100_hdp_reset(rdev); | 459 | r100_hdp_reset(rdev); |
306 | /* FIXME: is this correct ? */ | ||
307 | r420_pipes_init(rdev); | 460 | r420_pipes_init(rdev); |
308 | /* Wait for mc idle */ | 461 | /* Wait for mc idle */ |
309 | if (rs600_mc_wait_for_idle(rdev)) | 462 | if (rs600_mc_wait_for_idle(rdev)) |
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev) | |||
312 | 465 | ||
313 | void rs600_vram_info(struct radeon_device *rdev) | 466 | void rs600_vram_info(struct radeon_device *rdev) |
314 | { | 467 | { |
315 | /* FIXME: to do or is these values sane ? */ | ||
316 | rdev->mc.vram_is_ddr = true; | 468 | rdev->mc.vram_is_ddr = true; |
317 | rdev->mc.vram_width = 128; | 469 | rdev->mc.vram_width = 128; |
470 | |||
471 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | ||
472 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | ||
473 | |||
474 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | ||
475 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | ||
476 | |||
477 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
478 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
479 | |||
480 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
481 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
318 | } | 482 | } |
319 | 483 | ||
320 | void rs600_bandwidth_update(struct radeon_device *rdev) | 484 | void rs600_bandwidth_update(struct radeon_device *rdev) |
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev) | |||
388 | if (r) | 552 | if (r) |
389 | return r; | 553 | return r; |
390 | /* Enable IRQ */ | 554 | /* Enable IRQ */ |
391 | rdev->irq.sw_int = true; | ||
392 | rs600_irq_set(rdev); | 555 | rs600_irq_set(rdev); |
393 | /* 1M ring buffer */ | 556 | /* 1M ring buffer */ |
394 | r = r100_cp_init(rdev, 1024 * 1024); | 557 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -445,7 +608,7 @@ void rs600_fini(struct radeon_device *rdev) | |||
445 | rs600_gart_fini(rdev); | 608 | rs600_gart_fini(rdev); |
446 | radeon_irq_kms_fini(rdev); | 609 | radeon_irq_kms_fini(rdev); |
447 | radeon_fence_driver_fini(rdev); | 610 | radeon_fence_driver_fini(rdev); |
448 | radeon_object_fini(rdev); | 611 | radeon_bo_fini(rdev); |
449 | radeon_atombios_fini(rdev); | 612 | radeon_atombios_fini(rdev); |
450 | kfree(rdev->bios); | 613 | kfree(rdev->bios); |
451 | rdev->bios = NULL; | 614 | rdev->bios = NULL; |
@@ -482,10 +645,9 @@ int rs600_init(struct radeon_device *rdev) | |||
482 | RREG32(R_0007C0_CP_STAT)); | 645 | RREG32(R_0007C0_CP_STAT)); |
483 | } | 646 | } |
484 | /* check if cards are posted or not */ | 647 | /* check if cards are posted or not */ |
485 | if (!radeon_card_posted(rdev) && rdev->bios) { | 648 | if (radeon_boot_test_post_card(rdev) == false) |
486 | DRM_INFO("GPU not posted. posting now...\n"); | 649 | return -EINVAL; |
487 | atom_asic_init(rdev->mode_info.atom_context); | 650 | |
488 | } | ||
489 | /* Initialize clocks */ | 651 | /* Initialize clocks */ |
490 | radeon_get_clock_info(rdev->ddev); | 652 | radeon_get_clock_info(rdev->ddev); |
491 | /* Initialize power management */ | 653 | /* Initialize power management */ |
@@ -493,7 +655,7 @@ int rs600_init(struct radeon_device *rdev) | |||
493 | /* Get vram informations */ | 655 | /* Get vram informations */ |
494 | rs600_vram_info(rdev); | 656 | rs600_vram_info(rdev); |
495 | /* Initialize memory controller (also test AGP) */ | 657 | /* Initialize memory controller (also test AGP) */ |
496 | r = r420_mc_init(rdev); | 658 | r = rs600_mc_init(rdev); |
497 | if (r) | 659 | if (r) |
498 | return r; | 660 | return r; |
499 | rs600_debugfs(rdev); | 661 | rs600_debugfs(rdev); |
@@ -505,7 +667,7 @@ int rs600_init(struct radeon_device *rdev) | |||
505 | if (r) | 667 | if (r) |
506 | return r; | 668 | return r; |
507 | /* Memory manager */ | 669 | /* Memory manager */ |
508 | r = radeon_object_init(rdev); | 670 | r = radeon_bo_init(rdev); |
509 | if (r) | 671 | if (r) |
510 | return r; | 672 | return r; |
511 | r = rs600_gart_init(rdev); | 673 | r = rs600_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h index 81308924859a..c1c8f5885cbb 100644 --- a/drivers/gpu/drm/radeon/rs600d.h +++ b/drivers/gpu/drm/radeon/rs600d.h | |||
@@ -30,27 +30,12 @@ | |||
30 | 30 | ||
31 | /* Registers */ | 31 | /* Registers */ |
32 | #define R_000040_GEN_INT_CNTL 0x000040 | 32 | #define R_000040_GEN_INT_CNTL 0x000040 |
33 | #define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) | 33 | #define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18) |
34 | #define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) | 34 | #define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1) |
35 | #define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE | 35 | #define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF |
36 | #define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) | 36 | #define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19) |
37 | #define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) | 37 | #define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1) |
38 | #define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF | 38 | #define C_000040_GUI_IDLE_MASK 0xFFF7FFFF |
39 | #define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) | ||
40 | #define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) | ||
41 | #define C_000040_CRTC2_VSYNC 0xFFFFFFBF | ||
42 | #define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) | ||
43 | #define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) | ||
44 | #define C_000040_SNAPSHOT2 0xFFFFFF7F | ||
45 | #define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) | ||
46 | #define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) | ||
47 | #define C_000040_CRTC2_VBLANK 0xFFFFFDFF | ||
48 | #define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) | ||
49 | #define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) | ||
50 | #define C_000040_FP2_DETECT 0xFFFFFBFF | ||
51 | #define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) | ||
52 | #define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) | ||
53 | #define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF | ||
54 | #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) | 39 | #define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) |
55 | #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) | 40 | #define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) |
56 | #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF | 41 | #define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF |
@@ -370,7 +355,90 @@ | |||
370 | #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) | 355 | #define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) |
371 | #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) | 356 | #define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) |
372 | #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF | 357 | #define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF |
373 | 358 | #define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16) | |
359 | #define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1) | ||
360 | #define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF | ||
361 | #define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17) | ||
362 | #define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1) | ||
363 | #define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF | ||
364 | #define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18) | ||
365 | #define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1) | ||
366 | #define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF | ||
367 | #define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19) | ||
368 | #define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1) | ||
369 | #define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF | ||
370 | #define R_007828_DACA_AUTODETECT_CONTROL 0x007828 | ||
371 | #define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0) | ||
372 | #define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3) | ||
373 | #define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC | ||
374 | #define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8) | ||
375 | #define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff) | ||
376 | #define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF | ||
377 | #define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16) | ||
378 | #define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3) | ||
379 | #define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF | ||
380 | #define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838 | ||
381 | #define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0) | ||
382 | #define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE | ||
383 | #define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16) | ||
384 | #define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1) | ||
385 | #define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF | ||
386 | #define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28 | ||
387 | #define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0) | ||
388 | #define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3) | ||
389 | #define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC | ||
390 | #define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8) | ||
391 | #define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff) | ||
392 | #define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF | ||
393 | #define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16) | ||
394 | #define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3) | ||
395 | #define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF | ||
396 | #define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38 | ||
397 | #define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0) | ||
398 | #define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE | ||
399 | #define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16) | ||
400 | #define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1) | ||
401 | #define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF | ||
402 | #define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00 | ||
403 | #define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0) | ||
404 | #define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1) | ||
405 | #define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE | ||
406 | #define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04 | ||
407 | #define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0) | ||
408 | #define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1) | ||
409 | #define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE | ||
410 | #define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1) | ||
411 | #define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1) | ||
412 | #define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD | ||
413 | #define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08 | ||
414 | #define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0) | ||
415 | #define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE | ||
416 | #define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8) | ||
417 | #define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1) | ||
418 | #define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF | ||
419 | #define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16) | ||
420 | #define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1) | ||
421 | #define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF | ||
422 | #define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10 | ||
423 | #define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0) | ||
424 | #define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1) | ||
425 | #define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE | ||
426 | #define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14 | ||
427 | #define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0) | ||
428 | #define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1) | ||
429 | #define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE | ||
430 | #define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1) | ||
431 | #define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1) | ||
432 | #define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD | ||
433 | #define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18 | ||
434 | #define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0) | ||
435 | #define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE | ||
436 | #define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8) | ||
437 | #define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1) | ||
438 | #define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF | ||
439 | #define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16) | ||
440 | #define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1) | ||
441 | #define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF | ||
374 | 442 | ||
375 | /* MC registers */ | 443 | /* MC registers */ |
376 | #define R_000000_MC_STATUS 0x000000 | 444 | #define R_000000_MC_STATUS 0x000000 |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 27547175cf93..eb486ee7ea00 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev) | |||
131 | 131 | ||
132 | void rs690_vram_info(struct radeon_device *rdev) | 132 | void rs690_vram_info(struct radeon_device *rdev) |
133 | { | 133 | { |
134 | uint32_t tmp; | ||
135 | fixed20_12 a; | 134 | fixed20_12 a; |
136 | 135 | ||
137 | rs400_gart_adjust_size(rdev); | 136 | rs400_gart_adjust_size(rdev); |
138 | /* DDR for all card after R300 & IGP */ | 137 | |
139 | rdev->mc.vram_is_ddr = true; | 138 | rdev->mc.vram_is_ddr = true; |
140 | /* FIXME: is this correct for RS690/RS740 ? */ | 139 | rdev->mc.vram_width = 128; |
141 | tmp = RREG32(RADEON_MEM_CNTL); | 140 | |
142 | if (tmp & R300_MEM_NUM_CHANNELS_MASK) { | ||
143 | rdev->mc.vram_width = 128; | ||
144 | } else { | ||
145 | rdev->mc.vram_width = 64; | ||
146 | } | ||
147 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 141 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
148 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 142 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
149 | 143 | ||
150 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); | 144 | rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
151 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); | 145 | rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
146 | |||
147 | if (rdev->mc.mc_vram_size > rdev->mc.aper_size) | ||
148 | rdev->mc.mc_vram_size = rdev->mc.aper_size; | ||
149 | |||
150 | if (rdev->mc.real_vram_size > rdev->mc.aper_size) | ||
151 | rdev->mc.real_vram_size = rdev->mc.aper_size; | ||
152 | |||
152 | rs690_pm_info(rdev); | 153 | rs690_pm_info(rdev); |
153 | /* FIXME: we should enforce default clock in case GPU is not in | 154 | /* FIXME: we should enforce default clock in case GPU is not in |
154 | * default setup | 155 | * default setup |
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev) | |||
161 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); | 162 | rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); |
162 | } | 163 | } |
163 | 164 | ||
165 | static int rs690_mc_init(struct radeon_device *rdev) | ||
166 | { | ||
167 | int r; | ||
168 | u32 tmp; | ||
169 | |||
170 | /* Setup GPU memory space */ | ||
171 | tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | ||
172 | rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; | ||
173 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | ||
174 | r = radeon_mc_setup(rdev); | ||
175 | if (r) | ||
176 | return r; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
164 | void rs690_line_buffer_adjust(struct radeon_device *rdev, | 180 | void rs690_line_buffer_adjust(struct radeon_device *rdev, |
165 | struct drm_display_mode *mode1, | 181 | struct drm_display_mode *mode1, |
166 | struct drm_display_mode *mode2) | 182 | struct drm_display_mode *mode2) |
@@ -605,7 +621,6 @@ static int rs690_startup(struct radeon_device *rdev) | |||
605 | if (r) | 621 | if (r) |
606 | return r; | 622 | return r; |
607 | /* Enable IRQ */ | 623 | /* Enable IRQ */ |
608 | rdev->irq.sw_int = true; | ||
609 | rs600_irq_set(rdev); | 624 | rs600_irq_set(rdev); |
610 | /* 1M ring buffer */ | 625 | /* 1M ring buffer */ |
611 | r = r100_cp_init(rdev, 1024 * 1024); | 626 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -662,7 +677,7 @@ void rs690_fini(struct radeon_device *rdev) | |||
662 | rs400_gart_fini(rdev); | 677 | rs400_gart_fini(rdev); |
663 | radeon_irq_kms_fini(rdev); | 678 | radeon_irq_kms_fini(rdev); |
664 | radeon_fence_driver_fini(rdev); | 679 | radeon_fence_driver_fini(rdev); |
665 | radeon_object_fini(rdev); | 680 | radeon_bo_fini(rdev); |
666 | radeon_atombios_fini(rdev); | 681 | radeon_atombios_fini(rdev); |
667 | kfree(rdev->bios); | 682 | kfree(rdev->bios); |
668 | rdev->bios = NULL; | 683 | rdev->bios = NULL; |
@@ -700,10 +715,9 @@ int rs690_init(struct radeon_device *rdev) | |||
700 | RREG32(R_0007C0_CP_STAT)); | 715 | RREG32(R_0007C0_CP_STAT)); |
701 | } | 716 | } |
702 | /* check if cards are posted or not */ | 717 | /* check if cards are posted or not */ |
703 | if (!radeon_card_posted(rdev) && rdev->bios) { | 718 | if (radeon_boot_test_post_card(rdev) == false) |
704 | DRM_INFO("GPU not posted. posting now...\n"); | 719 | return -EINVAL; |
705 | atom_asic_init(rdev->mode_info.atom_context); | 720 | |
706 | } | ||
707 | /* Initialize clocks */ | 721 | /* Initialize clocks */ |
708 | radeon_get_clock_info(rdev->ddev); | 722 | radeon_get_clock_info(rdev->ddev); |
709 | /* Initialize power management */ | 723 | /* Initialize power management */ |
@@ -711,7 +725,7 @@ int rs690_init(struct radeon_device *rdev) | |||
711 | /* Get vram informations */ | 725 | /* Get vram informations */ |
712 | rs690_vram_info(rdev); | 726 | rs690_vram_info(rdev); |
713 | /* Initialize memory controller (also test AGP) */ | 727 | /* Initialize memory controller (also test AGP) */ |
714 | r = r420_mc_init(rdev); | 728 | r = rs690_mc_init(rdev); |
715 | if (r) | 729 | if (r) |
716 | return r; | 730 | return r; |
717 | rv515_debugfs(rdev); | 731 | rv515_debugfs(rdev); |
@@ -723,7 +737,7 @@ int rs690_init(struct radeon_device *rdev) | |||
723 | if (r) | 737 | if (r) |
724 | return r; | 738 | return r; |
725 | /* Memory manager */ | 739 | /* Memory manager */ |
726 | r = radeon_object_init(rdev); | 740 | r = radeon_bo_init(rdev); |
727 | if (r) | 741 | if (r) |
728 | return r; | 742 | return r; |
729 | r = rs400_gart_init(rdev); | 743 | r = rs400_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 7935f793bf62..7793239e24b2 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -137,8 +137,6 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev) | |||
137 | 137 | ||
138 | void rv515_vga_render_disable(struct radeon_device *rdev) | 138 | void rv515_vga_render_disable(struct radeon_device *rdev) |
139 | { | 139 | { |
140 | WREG32(R_000330_D1VGA_CONTROL, 0); | ||
141 | WREG32(R_000338_D2VGA_CONTROL, 0); | ||
142 | WREG32(R_000300_VGA_RENDER_CONTROL, | 140 | WREG32(R_000300_VGA_RENDER_CONTROL, |
143 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); | 141 | RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL); |
144 | } | 142 | } |
@@ -382,7 +380,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) | |||
382 | save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); | 380 | save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); |
383 | 381 | ||
384 | /* Stop all video */ | 382 | /* Stop all video */ |
385 | WREG32(R_000330_D1VGA_CONTROL, 0); | ||
386 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | 383 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); |
387 | WREG32(R_000300_VGA_RENDER_CONTROL, 0); | 384 | WREG32(R_000300_VGA_RENDER_CONTROL, 0); |
388 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); | 385 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); |
@@ -391,6 +388,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) | |||
391 | WREG32(R_006880_D2CRTC_CONTROL, 0); | 388 | WREG32(R_006880_D2CRTC_CONTROL, 0); |
392 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); | 389 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); |
393 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | 390 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); |
391 | WREG32(R_000330_D1VGA_CONTROL, 0); | ||
392 | WREG32(R_000338_D2VGA_CONTROL, 0); | ||
394 | } | 393 | } |
395 | 394 | ||
396 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) | 395 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) |
@@ -404,14 +403,14 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) | |||
404 | WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); | 403 | WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); |
405 | mdelay(1); | 404 | mdelay(1); |
406 | /* Restore video state */ | 405 | /* Restore video state */ |
406 | WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); | ||
407 | WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); | ||
407 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); | 408 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); |
408 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); | 409 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); |
409 | WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); | 410 | WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); |
410 | WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); | 411 | WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); |
411 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); | 412 | WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); |
412 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); | 413 | WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); |
413 | WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); | ||
414 | WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); | ||
415 | WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); | 414 | WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); |
416 | } | 415 | } |
417 | 416 | ||
@@ -479,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev) | |||
479 | return r; | 478 | return r; |
480 | } | 479 | } |
481 | /* Enable IRQ */ | 480 | /* Enable IRQ */ |
482 | rdev->irq.sw_int = true; | ||
483 | rs600_irq_set(rdev); | 481 | rs600_irq_set(rdev); |
484 | /* 1M ring buffer */ | 482 | /* 1M ring buffer */ |
485 | r = r100_cp_init(rdev, 1024 * 1024); | 483 | r = r100_cp_init(rdev, 1024 * 1024); |
@@ -541,11 +539,11 @@ void rv515_fini(struct radeon_device *rdev) | |||
541 | r100_wb_fini(rdev); | 539 | r100_wb_fini(rdev); |
542 | r100_ib_fini(rdev); | 540 | r100_ib_fini(rdev); |
543 | radeon_gem_fini(rdev); | 541 | radeon_gem_fini(rdev); |
544 | rv370_pcie_gart_fini(rdev); | 542 | rv370_pcie_gart_fini(rdev); |
545 | radeon_agp_fini(rdev); | 543 | radeon_agp_fini(rdev); |
546 | radeon_irq_kms_fini(rdev); | 544 | radeon_irq_kms_fini(rdev); |
547 | radeon_fence_driver_fini(rdev); | 545 | radeon_fence_driver_fini(rdev); |
548 | radeon_object_fini(rdev); | 546 | radeon_bo_fini(rdev); |
549 | radeon_atombios_fini(rdev); | 547 | radeon_atombios_fini(rdev); |
550 | kfree(rdev->bios); | 548 | kfree(rdev->bios); |
551 | rdev->bios = NULL; | 549 | rdev->bios = NULL; |
@@ -581,10 +579,8 @@ int rv515_init(struct radeon_device *rdev) | |||
581 | RREG32(R_0007C0_CP_STAT)); | 579 | RREG32(R_0007C0_CP_STAT)); |
582 | } | 580 | } |
583 | /* check if cards are posted or not */ | 581 | /* check if cards are posted or not */ |
584 | if (!radeon_card_posted(rdev) && rdev->bios) { | 582 | if (radeon_boot_test_post_card(rdev) == false) |
585 | DRM_INFO("GPU not posted. posting now...\n"); | 583 | return -EINVAL; |
586 | atom_asic_init(rdev->mode_info.atom_context); | ||
587 | } | ||
588 | /* Initialize clocks */ | 584 | /* Initialize clocks */ |
589 | radeon_get_clock_info(rdev->ddev); | 585 | radeon_get_clock_info(rdev->ddev); |
590 | /* Initialize power management */ | 586 | /* Initialize power management */ |
@@ -604,7 +600,7 @@ int rv515_init(struct radeon_device *rdev) | |||
604 | if (r) | 600 | if (r) |
605 | return r; | 601 | return r; |
606 | /* Memory manager */ | 602 | /* Memory manager */ |
607 | r = radeon_object_init(rdev); | 603 | r = radeon_bo_init(rdev); |
608 | if (r) | 604 | if (r) |
609 | return r; | 605 | return r; |
610 | r = rv370_pcie_gart_init(rdev); | 606 | r = rv370_pcie_gart_init(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b0efd0ddae7a..dd4f02096a80 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
92 | void rv770_pcie_gart_disable(struct radeon_device *rdev) | 92 | void rv770_pcie_gart_disable(struct radeon_device *rdev) |
93 | { | 93 | { |
94 | u32 tmp; | 94 | u32 tmp; |
95 | int i; | 95 | int i, r; |
96 | 96 | ||
97 | /* Disable all tables */ | 97 | /* Disable all tables */ |
98 | for (i = 0; i < 7; i++) | 98 | for (i = 0; i < 7; i++) |
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) | |||
113 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 113 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
114 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); | 114 | WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); |
115 | if (rdev->gart.table.vram.robj) { | 115 | if (rdev->gart.table.vram.robj) { |
116 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 116 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
117 | radeon_object_unpin(rdev->gart.table.vram.robj); | 117 | if (likely(r == 0)) { |
118 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
119 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
120 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
121 | } | ||
118 | } | 122 | } |
119 | } | 123 | } |
120 | 124 | ||
@@ -880,13 +884,26 @@ static int rv770_startup(struct radeon_device *rdev) | |||
880 | } | 884 | } |
881 | rv770_gpu_init(rdev); | 885 | rv770_gpu_init(rdev); |
882 | 886 | ||
883 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 887 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
884 | &rdev->r600_blit.shader_gpu_addr); | 888 | if (unlikely(r != 0)) |
889 | return r; | ||
890 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
891 | &rdev->r600_blit.shader_gpu_addr); | ||
892 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
885 | if (r) { | 893 | if (r) { |
886 | DRM_ERROR("failed to pin blit object %d\n", r); | 894 | DRM_ERROR("failed to pin blit object %d\n", r); |
887 | return r; | 895 | return r; |
888 | } | 896 | } |
889 | 897 | ||
898 | /* Enable IRQ */ | ||
899 | r = r600_irq_init(rdev); | ||
900 | if (r) { | ||
901 | DRM_ERROR("radeon: IH init failed (%d).\n", r); | ||
902 | radeon_irq_kms_fini(rdev); | ||
903 | return r; | ||
904 | } | ||
905 | r600_irq_set(rdev); | ||
906 | |||
890 | r = radeon_ring_init(rdev, rdev->cp.ring_size); | 907 | r = radeon_ring_init(rdev, rdev->cp.ring_size); |
891 | if (r) | 908 | if (r) |
892 | return r; | 909 | return r; |
@@ -934,13 +951,19 @@ int rv770_resume(struct radeon_device *rdev) | |||
934 | 951 | ||
935 | int rv770_suspend(struct radeon_device *rdev) | 952 | int rv770_suspend(struct radeon_device *rdev) |
936 | { | 953 | { |
954 | int r; | ||
955 | |||
937 | /* FIXME: we should wait for ring to be empty */ | 956 | /* FIXME: we should wait for ring to be empty */ |
938 | r700_cp_stop(rdev); | 957 | r700_cp_stop(rdev); |
939 | rdev->cp.ready = false; | 958 | rdev->cp.ready = false; |
940 | r600_wb_disable(rdev); | 959 | r600_wb_disable(rdev); |
941 | rv770_pcie_gart_disable(rdev); | 960 | rv770_pcie_gart_disable(rdev); |
942 | /* unpin shaders bo */ | 961 | /* unpin shaders bo */ |
943 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 962 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
963 | if (likely(r == 0)) { | ||
964 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
965 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
966 | } | ||
944 | return 0; | 967 | return 0; |
945 | } | 968 | } |
946 | 969 | ||
@@ -975,7 +998,11 @@ int rv770_init(struct radeon_device *rdev) | |||
975 | if (r) | 998 | if (r) |
976 | return r; | 999 | return r; |
977 | /* Post card if necessary */ | 1000 | /* Post card if necessary */ |
978 | if (!r600_card_posted(rdev) && rdev->bios) { | 1001 | if (!r600_card_posted(rdev)) { |
1002 | if (!rdev->bios) { | ||
1003 | dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); | ||
1004 | return -EINVAL; | ||
1005 | } | ||
979 | DRM_INFO("GPU not posted. posting now...\n"); | 1006 | DRM_INFO("GPU not posted. posting now...\n"); |
980 | atom_asic_init(rdev->mode_info.atom_context); | 1007 | atom_asic_init(rdev->mode_info.atom_context); |
981 | } | 1008 | } |
@@ -998,14 +1025,22 @@ int rv770_init(struct radeon_device *rdev) | |||
998 | if (r) | 1025 | if (r) |
999 | return r; | 1026 | return r; |
1000 | /* Memory manager */ | 1027 | /* Memory manager */ |
1001 | r = radeon_object_init(rdev); | 1028 | r = radeon_bo_init(rdev); |
1029 | if (r) | ||
1030 | return r; | ||
1031 | |||
1032 | r = radeon_irq_kms_init(rdev); | ||
1002 | if (r) | 1033 | if (r) |
1003 | return r; | 1034 | return r; |
1035 | |||
1004 | rdev->cp.ring_obj = NULL; | 1036 | rdev->cp.ring_obj = NULL; |
1005 | r600_ring_init(rdev, 1024 * 1024); | 1037 | r600_ring_init(rdev, 1024 * 1024); |
1006 | 1038 | ||
1007 | if (!rdev->me_fw || !rdev->pfp_fw) { | 1039 | rdev->ih.ring_obj = NULL; |
1008 | r = r600_cp_init_microcode(rdev); | 1040 | r600_ih_ring_init(rdev, 64 * 1024); |
1041 | |||
1042 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | ||
1043 | r = r600_init_microcode(rdev); | ||
1009 | if (r) { | 1044 | if (r) { |
1010 | DRM_ERROR("Failed to load firmware!\n"); | 1045 | DRM_ERROR("Failed to load firmware!\n"); |
1011 | return r; | 1046 | return r; |
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev) | |||
1051 | rv770_suspend(rdev); | 1086 | rv770_suspend(rdev); |
1052 | 1087 | ||
1053 | r600_blit_fini(rdev); | 1088 | r600_blit_fini(rdev); |
1089 | r600_irq_fini(rdev); | ||
1090 | radeon_irq_kms_fini(rdev); | ||
1054 | radeon_ring_fini(rdev); | 1091 | radeon_ring_fini(rdev); |
1055 | r600_wb_fini(rdev); | 1092 | r600_wb_fini(rdev); |
1056 | rv770_pcie_gart_fini(rdev); | 1093 | rv770_pcie_gart_fini(rdev); |
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
1059 | radeon_clocks_fini(rdev); | 1096 | radeon_clocks_fini(rdev); |
1060 | if (rdev->flags & RADEON_IS_AGP) | 1097 | if (rdev->flags & RADEON_IS_AGP) |
1061 | radeon_agp_fini(rdev); | 1098 | radeon_agp_fini(rdev); |
1062 | radeon_object_fini(rdev); | 1099 | radeon_bo_fini(rdev); |
1063 | radeon_atombios_fini(rdev); | 1100 | radeon_atombios_fini(rdev); |
1064 | kfree(rdev->bios); | 1101 | kfree(rdev->bios); |
1065 | rdev->bios = NULL; | 1102 | rdev->bios = NULL; |
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b0a9de7a57c2..1e138f5bae09 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ |
7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_DRM_TTM) += ttm.o | 9 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 87c06252d464..e13fd23f3334 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -275,9 +275,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | 275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
276 | page_flags | TTM_PAGE_FLAG_USER, | 276 | page_flags | TTM_PAGE_FLAG_USER, |
277 | glob->dummy_read_page); | 277 | glob->dummy_read_page); |
278 | if (unlikely(bo->ttm == NULL)) | 278 | if (unlikely(bo->ttm == NULL)) { |
279 | ret = -ENOMEM; | 279 | ret = -ENOMEM; |
280 | break; | 280 | break; |
281 | } | ||
281 | 282 | ||
282 | ret = ttm_tt_set_user(bo->ttm, current, | 283 | ret = ttm_tt_set_user(bo->ttm, current, |
283 | bo->buffer_start, bo->num_pages); | 284 | bo->buffer_start, bo->num_pages); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c70927ecda21..ceae52f45c39 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
369 | #endif | 369 | #endif |
370 | return tmp; | 370 | return tmp; |
371 | } | 371 | } |
372 | EXPORT_SYMBOL(ttm_io_prot); | ||
372 | 373 | ||
373 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | 374 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
374 | unsigned long bus_base, | 375 | unsigned long bus_base, |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c new file mode 100644 index 000000000000..c285c2902d15 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "ttm/ttm_execbuf_util.h" | ||
29 | #include "ttm/ttm_bo_driver.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | #include <linux/wait.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
36 | { | ||
37 | struct ttm_validate_buffer *entry; | ||
38 | |||
39 | list_for_each_entry(entry, list, head) { | ||
40 | struct ttm_buffer_object *bo = entry->bo; | ||
41 | if (!entry->reserved) | ||
42 | continue; | ||
43 | |||
44 | entry->reserved = false; | ||
45 | ttm_bo_unreserve(bo); | ||
46 | } | ||
47 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | ||
49 | |||
50 | /* | ||
51 | * Reserve buffers for validation. | ||
52 | * | ||
53 | * If a buffer in the list is marked for CPU access, we back off and | ||
54 | * wait for that buffer to become free for GPU access. | ||
55 | * | ||
56 | * If a buffer is reserved for another validation, the validator with | ||
57 | * the highest validation sequence backs off and waits for that buffer | ||
58 | * to become unreserved. This prevents deadlocks when validating multiple | ||
59 | * buffers in different orders. | ||
60 | */ | ||
61 | |||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | ||
63 | { | ||
64 | struct ttm_validate_buffer *entry; | ||
65 | int ret; | ||
66 | |||
67 | retry: | ||
68 | list_for_each_entry(entry, list, head) { | ||
69 | struct ttm_buffer_object *bo = entry->bo; | ||
70 | |||
71 | entry->reserved = false; | ||
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | ||
73 | if (ret != 0) { | ||
74 | ttm_eu_backoff_reservation(list); | ||
75 | if (ret == -EAGAIN) { | ||
76 | ret = ttm_bo_wait_unreserved(bo, true); | ||
77 | if (unlikely(ret != 0)) | ||
78 | return ret; | ||
79 | goto retry; | ||
80 | } else | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | entry->reserved = true; | ||
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
86 | ttm_eu_backoff_reservation(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | goto retry; | ||
91 | } | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | ||
96 | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | ||
98 | { | ||
99 | struct ttm_validate_buffer *entry; | ||
100 | |||
101 | list_for_each_entry(entry, list, head) { | ||
102 | struct ttm_buffer_object *bo = entry->bo; | ||
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | ||
104 | void *old_sync_obj; | ||
105 | |||
106 | spin_lock(&bo->lock); | ||
107 | old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | ||
110 | spin_unlock(&bo->lock); | ||
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | ||
113 | if (old_sync_obj) | ||
114 | driver->sync_obj_unref(&old_sync_obj); | ||
115 | } | ||
116 | } | ||
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c new file mode 100644 index 000000000000..f619ebcaa4ec --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_lock.h" | ||
32 | #include "ttm/ttm_module.h" | ||
33 | #include <asm/atomic.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/module.h> | ||
38 | |||
39 | #define TTM_WRITE_LOCK_PENDING (1 << 0) | ||
40 | #define TTM_VT_LOCK_PENDING (1 << 1) | ||
41 | #define TTM_SUSPEND_LOCK_PENDING (1 << 2) | ||
42 | #define TTM_VT_LOCK (1 << 3) | ||
43 | #define TTM_SUSPEND_LOCK (1 << 4) | ||
44 | |||
45 | void ttm_lock_init(struct ttm_lock *lock) | ||
46 | { | ||
47 | spin_lock_init(&lock->lock); | ||
48 | init_waitqueue_head(&lock->queue); | ||
49 | lock->rw = 0; | ||
50 | lock->flags = 0; | ||
51 | lock->kill_takers = false; | ||
52 | lock->signal = SIGKILL; | ||
53 | } | ||
54 | EXPORT_SYMBOL(ttm_lock_init); | ||
55 | |||
56 | void ttm_read_unlock(struct ttm_lock *lock) | ||
57 | { | ||
58 | spin_lock(&lock->lock); | ||
59 | if (--lock->rw == 0) | ||
60 | wake_up_all(&lock->queue); | ||
61 | spin_unlock(&lock->lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL(ttm_read_unlock); | ||
64 | |||
65 | static bool __ttm_read_lock(struct ttm_lock *lock) | ||
66 | { | ||
67 | bool locked = false; | ||
68 | |||
69 | spin_lock(&lock->lock); | ||
70 | if (unlikely(lock->kill_takers)) { | ||
71 | send_sig(lock->signal, current, 0); | ||
72 | spin_unlock(&lock->lock); | ||
73 | return false; | ||
74 | } | ||
75 | if (lock->rw >= 0 && lock->flags == 0) { | ||
76 | ++lock->rw; | ||
77 | locked = true; | ||
78 | } | ||
79 | spin_unlock(&lock->lock); | ||
80 | return locked; | ||
81 | } | ||
82 | |||
83 | int ttm_read_lock(struct ttm_lock *lock, bool interruptible) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (interruptible) | ||
88 | ret = wait_event_interruptible(lock->queue, | ||
89 | __ttm_read_lock(lock)); | ||
90 | else | ||
91 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
92 | return ret; | ||
93 | } | ||
94 | EXPORT_SYMBOL(ttm_read_lock); | ||
95 | |||
96 | static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) | ||
97 | { | ||
98 | bool block = true; | ||
99 | |||
100 | *locked = false; | ||
101 | |||
102 | spin_lock(&lock->lock); | ||
103 | if (unlikely(lock->kill_takers)) { | ||
104 | send_sig(lock->signal, current, 0); | ||
105 | spin_unlock(&lock->lock); | ||
106 | return false; | ||
107 | } | ||
108 | if (lock->rw >= 0 && lock->flags == 0) { | ||
109 | ++lock->rw; | ||
110 | block = false; | ||
111 | *locked = true; | ||
112 | } else if (lock->flags == 0) { | ||
113 | block = false; | ||
114 | } | ||
115 | spin_unlock(&lock->lock); | ||
116 | |||
117 | return !block; | ||
118 | } | ||
119 | |||
120 | int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) | ||
121 | { | ||
122 | int ret = 0; | ||
123 | bool locked; | ||
124 | |||
125 | if (interruptible) | ||
126 | ret = wait_event_interruptible | ||
127 | (lock->queue, __ttm_read_trylock(lock, &locked)); | ||
128 | else | ||
129 | wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); | ||
130 | |||
131 | if (unlikely(ret != 0)) { | ||
132 | BUG_ON(locked); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | return (locked) ? 0 : -EBUSY; | ||
137 | } | ||
138 | |||
139 | void ttm_write_unlock(struct ttm_lock *lock) | ||
140 | { | ||
141 | spin_lock(&lock->lock); | ||
142 | lock->rw = 0; | ||
143 | wake_up_all(&lock->queue); | ||
144 | spin_unlock(&lock->lock); | ||
145 | } | ||
146 | EXPORT_SYMBOL(ttm_write_unlock); | ||
147 | |||
148 | static bool __ttm_write_lock(struct ttm_lock *lock) | ||
149 | { | ||
150 | bool locked = false; | ||
151 | |||
152 | spin_lock(&lock->lock); | ||
153 | if (unlikely(lock->kill_takers)) { | ||
154 | send_sig(lock->signal, current, 0); | ||
155 | spin_unlock(&lock->lock); | ||
156 | return false; | ||
157 | } | ||
158 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { | ||
159 | lock->rw = -1; | ||
160 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
161 | locked = true; | ||
162 | } else { | ||
163 | lock->flags |= TTM_WRITE_LOCK_PENDING; | ||
164 | } | ||
165 | spin_unlock(&lock->lock); | ||
166 | return locked; | ||
167 | } | ||
168 | |||
169 | int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | ||
170 | { | ||
171 | int ret = 0; | ||
172 | |||
173 | if (interruptible) { | ||
174 | ret = wait_event_interruptible(lock->queue, | ||
175 | __ttm_write_lock(lock)); | ||
176 | if (unlikely(ret != 0)) { | ||
177 | spin_lock(&lock->lock); | ||
178 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
179 | wake_up_all(&lock->queue); | ||
180 | spin_unlock(&lock->lock); | ||
181 | } | ||
182 | } else | ||
183 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | EXPORT_SYMBOL(ttm_write_lock); | ||
188 | |||
189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
190 | { | ||
191 | spin_lock(&lock->lock); | ||
192 | lock->rw = 1; | ||
193 | wake_up_all(&lock->queue); | ||
194 | spin_unlock(&lock->lock); | ||
195 | } | ||
196 | |||
197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | ||
198 | { | ||
199 | int ret = 0; | ||
200 | |||
201 | spin_lock(&lock->lock); | ||
202 | if (unlikely(!(lock->flags & TTM_VT_LOCK))) | ||
203 | ret = -EINVAL; | ||
204 | lock->flags &= ~TTM_VT_LOCK; | ||
205 | wake_up_all(&lock->queue); | ||
206 | spin_unlock(&lock->lock); | ||
207 | printk(KERN_INFO TTM_PFX "vt unlock.\n"); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void ttm_vt_lock_remove(struct ttm_base_object **p_base) | ||
213 | { | ||
214 | struct ttm_base_object *base = *p_base; | ||
215 | struct ttm_lock *lock = container_of(base, struct ttm_lock, base); | ||
216 | int ret; | ||
217 | |||
218 | *p_base = NULL; | ||
219 | ret = __ttm_vt_unlock(lock); | ||
220 | BUG_ON(ret != 0); | ||
221 | } | ||
222 | |||
223 | static bool __ttm_vt_lock(struct ttm_lock *lock) | ||
224 | { | ||
225 | bool locked = false; | ||
226 | |||
227 | spin_lock(&lock->lock); | ||
228 | if (lock->rw == 0) { | ||
229 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
230 | lock->flags |= TTM_VT_LOCK; | ||
231 | locked = true; | ||
232 | } else { | ||
233 | lock->flags |= TTM_VT_LOCK_PENDING; | ||
234 | } | ||
235 | spin_unlock(&lock->lock); | ||
236 | return locked; | ||
237 | } | ||
238 | |||
239 | int ttm_vt_lock(struct ttm_lock *lock, | ||
240 | bool interruptible, | ||
241 | struct ttm_object_file *tfile) | ||
242 | { | ||
243 | int ret = 0; | ||
244 | |||
245 | if (interruptible) { | ||
246 | ret = wait_event_interruptible(lock->queue, | ||
247 | __ttm_vt_lock(lock)); | ||
248 | if (unlikely(ret != 0)) { | ||
249 | spin_lock(&lock->lock); | ||
250 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
251 | wake_up_all(&lock->queue); | ||
252 | spin_unlock(&lock->lock); | ||
253 | return ret; | ||
254 | } | ||
255 | } else | ||
256 | wait_event(lock->queue, __ttm_vt_lock(lock)); | ||
257 | |||
258 | /* | ||
259 | * Add a base-object, the destructor of which will | ||
260 | * make sure the lock is released if the client dies | ||
261 | * while holding it. | ||
262 | */ | ||
263 | |||
264 | ret = ttm_base_object_init(tfile, &lock->base, false, | ||
265 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | ||
266 | if (ret) | ||
267 | (void)__ttm_vt_unlock(lock); | ||
268 | else { | ||
269 | lock->vt_holder = tfile; | ||
270 | printk(KERN_INFO TTM_PFX "vt lock.\n"); | ||
271 | } | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | EXPORT_SYMBOL(ttm_vt_lock); | ||
276 | |||
277 | int ttm_vt_unlock(struct ttm_lock *lock) | ||
278 | { | ||
279 | return ttm_ref_object_base_unref(lock->vt_holder, | ||
280 | lock->base.hash.key, TTM_REF_USAGE); | ||
281 | } | ||
282 | EXPORT_SYMBOL(ttm_vt_unlock); | ||
283 | |||
284 | void ttm_suspend_unlock(struct ttm_lock *lock) | ||
285 | { | ||
286 | spin_lock(&lock->lock); | ||
287 | lock->flags &= ~TTM_SUSPEND_LOCK; | ||
288 | wake_up_all(&lock->queue); | ||
289 | spin_unlock(&lock->lock); | ||
290 | } | ||
291 | |||
292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | ||
293 | { | ||
294 | bool locked = false; | ||
295 | |||
296 | spin_lock(&lock->lock); | ||
297 | if (lock->rw == 0) { | ||
298 | lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; | ||
299 | lock->flags |= TTM_SUSPEND_LOCK; | ||
300 | locked = true; | ||
301 | } else { | ||
302 | lock->flags |= TTM_SUSPEND_LOCK_PENDING; | ||
303 | } | ||
304 | spin_unlock(&lock->lock); | ||
305 | return locked; | ||
306 | } | ||
307 | |||
308 | void ttm_suspend_lock(struct ttm_lock *lock) | ||
309 | { | ||
310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | ||
311 | } | ||
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 072c281a6bb5..8bfde5f40841 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | 274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, |
275 | const struct sysinfo *si) | 275 | const struct sysinfo *si) |
276 | { | 276 | { |
277 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); | 277 | struct ttm_mem_zone *zone; |
278 | uint64_t mem; | 278 | uint64_t mem; |
279 | int ret; | 279 | int ret; |
280 | 280 | ||
281 | if (unlikely(!zone)) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | if (si->totalhigh == 0) | 281 | if (si->totalhigh == 0) |
285 | return 0; | 282 | return 0; |
286 | 283 | ||
284 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); | ||
285 | if (unlikely(!zone)) | ||
286 | return -ENOMEM; | ||
287 | |||
287 | mem = si->totalram; | 288 | mem = si->totalram; |
288 | mem *= si->mem_unit; | 289 | mem *= si->mem_unit; |
289 | 290 | ||
@@ -460,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob, | |||
460 | { | 461 | { |
461 | return ttm_mem_global_free_zone(glob, NULL, amount); | 462 | return ttm_mem_global_free_zone(glob, NULL, amount); |
462 | } | 463 | } |
464 | EXPORT_SYMBOL(ttm_mem_global_free); | ||
463 | 465 | ||
464 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, | 466 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, |
465 | struct ttm_mem_zone *single_zone, | 467 | struct ttm_mem_zone *single_zone, |
@@ -533,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, | |||
533 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, | 535 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, |
534 | interruptible); | 536 | interruptible); |
535 | } | 537 | } |
538 | EXPORT_SYMBOL(ttm_mem_global_alloc); | ||
536 | 539 | ||
537 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, | 540 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
538 | struct page *page, | 541 | struct page *page, |
@@ -588,3 +591,4 @@ size_t ttm_round_pot(size_t size) | |||
588 | } | 591 | } |
589 | return 0; | 592 | return 0; |
590 | } | 593 | } |
594 | EXPORT_SYMBOL(ttm_round_pot); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c new file mode 100644 index 000000000000..1099abac824b --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -0,0 +1,452 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | /** @file ttm_ref_object.c | ||
31 | * | ||
32 | * Base- and reference object implementation for the various | ||
33 | * ttm objects. Implements reference counting, minimal security checks | ||
34 | * and release on file close. | ||
35 | */ | ||
36 | |||
37 | /** | ||
38 | * struct ttm_object_file | ||
39 | * | ||
40 | * @tdev: Pointer to the ttm_object_device. | ||
41 | * | ||
42 | * @lock: Lock that protects the ref_list list and the | ||
43 | * ref_hash hash tables. | ||
44 | * | ||
45 | * @ref_list: List of ttm_ref_objects to be destroyed at | ||
46 | * file release. | ||
47 | * | ||
48 | * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, | ||
49 | * for fast lookup of ref objects given a base object. | ||
50 | */ | ||
51 | |||
52 | #include "ttm/ttm_object.h" | ||
53 | #include "ttm/ttm_module.h" | ||
54 | #include <linux/list.h> | ||
55 | #include <linux/spinlock.h> | ||
56 | #include <linux/slab.h> | ||
57 | #include <linux/module.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | struct ttm_object_file { | ||
61 | struct ttm_object_device *tdev; | ||
62 | rwlock_t lock; | ||
63 | struct list_head ref_list; | ||
64 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | ||
65 | struct kref refcount; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct ttm_object_device | ||
70 | * | ||
71 | * @object_lock: lock that protects the object_hash hash table. | ||
72 | * | ||
73 | * @object_hash: hash table for fast lookup of object global names. | ||
74 | * | ||
75 | * @object_count: Per device object count. | ||
76 | * | ||
77 | * This is the per-device data structure needed for ttm object management. | ||
78 | */ | ||
79 | |||
80 | struct ttm_object_device { | ||
81 | rwlock_t object_lock; | ||
82 | struct drm_open_hash object_hash; | ||
83 | atomic_t object_count; | ||
84 | struct ttm_mem_global *mem_glob; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * struct ttm_ref_object | ||
89 | * | ||
90 | * @hash: Hash entry for the per-file object reference hash. | ||
91 | * | ||
92 | * @head: List entry for the per-file list of ref-objects. | ||
93 | * | ||
94 | * @kref: Ref count. | ||
95 | * | ||
96 | * @obj: Base object this ref object is referencing. | ||
97 | * | ||
98 | * @ref_type: Type of ref object. | ||
99 | * | ||
100 | * This is similar to an idr object, but it also has a hash table entry | ||
101 | * that allows lookup with a pointer to the referenced object as a key. In | ||
102 | * that way, one can easily detect whether a base object is referenced by | ||
103 | * a particular ttm_object_file. It also carries a ref count to avoid creating | ||
104 | * multiple ref objects if a ttm_object_file references the same base | ||
105 | * object more than once. | ||
106 | */ | ||
107 | |||
108 | struct ttm_ref_object { | ||
109 | struct drm_hash_item hash; | ||
110 | struct list_head head; | ||
111 | struct kref kref; | ||
112 | struct ttm_base_object *obj; | ||
113 | enum ttm_ref_type ref_type; | ||
114 | struct ttm_object_file *tfile; | ||
115 | }; | ||
116 | |||
117 | static inline struct ttm_object_file * | ||
118 | ttm_object_file_ref(struct ttm_object_file *tfile) | ||
119 | { | ||
120 | kref_get(&tfile->refcount); | ||
121 | return tfile; | ||
122 | } | ||
123 | |||
124 | static void ttm_object_file_destroy(struct kref *kref) | ||
125 | { | ||
126 | struct ttm_object_file *tfile = | ||
127 | container_of(kref, struct ttm_object_file, refcount); | ||
128 | |||
129 | kfree(tfile); | ||
130 | } | ||
131 | |||
132 | |||
133 | static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) | ||
134 | { | ||
135 | struct ttm_object_file *tfile = *p_tfile; | ||
136 | |||
137 | *p_tfile = NULL; | ||
138 | kref_put(&tfile->refcount, ttm_object_file_destroy); | ||
139 | } | ||
140 | |||
141 | |||
142 | int ttm_base_object_init(struct ttm_object_file *tfile, | ||
143 | struct ttm_base_object *base, | ||
144 | bool shareable, | ||
145 | enum ttm_object_type object_type, | ||
146 | void (*refcount_release) (struct ttm_base_object **), | ||
147 | void (*ref_obj_release) (struct ttm_base_object *, | ||
148 | enum ttm_ref_type ref_type)) | ||
149 | { | ||
150 | struct ttm_object_device *tdev = tfile->tdev; | ||
151 | int ret; | ||
152 | |||
153 | base->shareable = shareable; | ||
154 | base->tfile = ttm_object_file_ref(tfile); | ||
155 | base->refcount_release = refcount_release; | ||
156 | base->ref_obj_release = ref_obj_release; | ||
157 | base->object_type = object_type; | ||
158 | write_lock(&tdev->object_lock); | ||
159 | kref_init(&base->refcount); | ||
160 | ret = drm_ht_just_insert_please(&tdev->object_hash, | ||
161 | &base->hash, | ||
162 | (unsigned long)base, 31, 0, 0); | ||
163 | write_unlock(&tdev->object_lock); | ||
164 | if (unlikely(ret != 0)) | ||
165 | goto out_err0; | ||
166 | |||
167 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | ||
168 | if (unlikely(ret != 0)) | ||
169 | goto out_err1; | ||
170 | |||
171 | ttm_base_object_unref(&base); | ||
172 | |||
173 | return 0; | ||
174 | out_err1: | ||
175 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
176 | out_err0: | ||
177 | return ret; | ||
178 | } | ||
179 | EXPORT_SYMBOL(ttm_base_object_init); | ||
180 | |||
181 | static void ttm_release_base(struct kref *kref) | ||
182 | { | ||
183 | struct ttm_base_object *base = | ||
184 | container_of(kref, struct ttm_base_object, refcount); | ||
185 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
186 | |||
187 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
188 | write_unlock(&tdev->object_lock); | ||
189 | if (base->refcount_release) { | ||
190 | ttm_object_file_unref(&base->tfile); | ||
191 | base->refcount_release(&base); | ||
192 | } | ||
193 | write_lock(&tdev->object_lock); | ||
194 | } | ||
195 | |||
196 | void ttm_base_object_unref(struct ttm_base_object **p_base) | ||
197 | { | ||
198 | struct ttm_base_object *base = *p_base; | ||
199 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
200 | |||
201 | *p_base = NULL; | ||
202 | |||
203 | /* | ||
204 | * Need to take the lock here to avoid racing with | ||
205 | * users trying to look up the object. | ||
206 | */ | ||
207 | |||
208 | write_lock(&tdev->object_lock); | ||
209 | (void)kref_put(&base->refcount, &ttm_release_base); | ||
210 | write_unlock(&tdev->object_lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(ttm_base_object_unref); | ||
213 | |||
214 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | ||
215 | uint32_t key) | ||
216 | { | ||
217 | struct ttm_object_device *tdev = tfile->tdev; | ||
218 | struct ttm_base_object *base; | ||
219 | struct drm_hash_item *hash; | ||
220 | int ret; | ||
221 | |||
222 | read_lock(&tdev->object_lock); | ||
223 | ret = drm_ht_find_item(&tdev->object_hash, key, &hash); | ||
224 | |||
225 | if (likely(ret == 0)) { | ||
226 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
227 | kref_get(&base->refcount); | ||
228 | } | ||
229 | read_unlock(&tdev->object_lock); | ||
230 | |||
231 | if (unlikely(ret != 0)) | ||
232 | return NULL; | ||
233 | |||
234 | if (tfile != base->tfile && !base->shareable) { | ||
235 | printk(KERN_ERR TTM_PFX | ||
236 | "Attempted access of non-shareable object.\n"); | ||
237 | ttm_base_object_unref(&base); | ||
238 | return NULL; | ||
239 | } | ||
240 | |||
241 | return base; | ||
242 | } | ||
243 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
244 | |||
245 | int ttm_ref_object_add(struct ttm_object_file *tfile, | ||
246 | struct ttm_base_object *base, | ||
247 | enum ttm_ref_type ref_type, bool *existed) | ||
248 | { | ||
249 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
250 | struct ttm_ref_object *ref; | ||
251 | struct drm_hash_item *hash; | ||
252 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
253 | int ret = -EINVAL; | ||
254 | |||
255 | if (existed != NULL) | ||
256 | *existed = true; | ||
257 | |||
258 | while (ret == -EINVAL) { | ||
259 | read_lock(&tfile->lock); | ||
260 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | ||
261 | |||
262 | if (ret == 0) { | ||
263 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
264 | kref_get(&ref->kref); | ||
265 | read_unlock(&tfile->lock); | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | read_unlock(&tfile->lock); | ||
270 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | ||
271 | false, false); | ||
272 | if (unlikely(ret != 0)) | ||
273 | return ret; | ||
274 | ref = kmalloc(sizeof(*ref), GFP_KERNEL); | ||
275 | if (unlikely(ref == NULL)) { | ||
276 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
277 | return -ENOMEM; | ||
278 | } | ||
279 | |||
280 | ref->hash.key = base->hash.key; | ||
281 | ref->obj = base; | ||
282 | ref->tfile = tfile; | ||
283 | ref->ref_type = ref_type; | ||
284 | kref_init(&ref->kref); | ||
285 | |||
286 | write_lock(&tfile->lock); | ||
287 | ret = drm_ht_insert_item(ht, &ref->hash); | ||
288 | |||
289 | if (likely(ret == 0)) { | ||
290 | list_add_tail(&ref->head, &tfile->ref_list); | ||
291 | kref_get(&base->refcount); | ||
292 | write_unlock(&tfile->lock); | ||
293 | if (existed != NULL) | ||
294 | *existed = false; | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | write_unlock(&tfile->lock); | ||
299 | BUG_ON(ret != -EINVAL); | ||
300 | |||
301 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
302 | kfree(ref); | ||
303 | } | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | EXPORT_SYMBOL(ttm_ref_object_add); | ||
308 | |||
309 | static void ttm_ref_object_release(struct kref *kref) | ||
310 | { | ||
311 | struct ttm_ref_object *ref = | ||
312 | container_of(kref, struct ttm_ref_object, kref); | ||
313 | struct ttm_base_object *base = ref->obj; | ||
314 | struct ttm_object_file *tfile = ref->tfile; | ||
315 | struct drm_open_hash *ht; | ||
316 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
317 | |||
318 | ht = &tfile->ref_hash[ref->ref_type]; | ||
319 | (void)drm_ht_remove_item(ht, &ref->hash); | ||
320 | list_del(&ref->head); | ||
321 | write_unlock(&tfile->lock); | ||
322 | |||
323 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | ||
324 | base->ref_obj_release(base, ref->ref_type); | ||
325 | |||
326 | ttm_base_object_unref(&ref->obj); | ||
327 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
328 | kfree(ref); | ||
329 | write_lock(&tfile->lock); | ||
330 | } | ||
331 | |||
332 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | ||
333 | unsigned long key, enum ttm_ref_type ref_type) | ||
334 | { | ||
335 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
336 | struct ttm_ref_object *ref; | ||
337 | struct drm_hash_item *hash; | ||
338 | int ret; | ||
339 | |||
340 | write_lock(&tfile->lock); | ||
341 | ret = drm_ht_find_item(ht, key, &hash); | ||
342 | if (unlikely(ret != 0)) { | ||
343 | write_unlock(&tfile->lock); | ||
344 | return -EINVAL; | ||
345 | } | ||
346 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
347 | kref_put(&ref->kref, ttm_ref_object_release); | ||
348 | write_unlock(&tfile->lock); | ||
349 | return 0; | ||
350 | } | ||
351 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | ||
352 | |||
353 | void ttm_object_file_release(struct ttm_object_file **p_tfile) | ||
354 | { | ||
355 | struct ttm_ref_object *ref; | ||
356 | struct list_head *list; | ||
357 | unsigned int i; | ||
358 | struct ttm_object_file *tfile = *p_tfile; | ||
359 | |||
360 | *p_tfile = NULL; | ||
361 | write_lock(&tfile->lock); | ||
362 | |||
363 | /* | ||
364 | * Since we release the lock within the loop, we have to | ||
365 | * restart it from the beginning each time. | ||
366 | */ | ||
367 | |||
368 | while (!list_empty(&tfile->ref_list)) { | ||
369 | list = tfile->ref_list.next; | ||
370 | ref = list_entry(list, struct ttm_ref_object, head); | ||
371 | ttm_ref_object_release(&ref->kref); | ||
372 | } | ||
373 | |||
374 | for (i = 0; i < TTM_REF_NUM; ++i) | ||
375 | drm_ht_remove(&tfile->ref_hash[i]); | ||
376 | |||
377 | write_unlock(&tfile->lock); | ||
378 | ttm_object_file_unref(&tfile); | ||
379 | } | ||
380 | EXPORT_SYMBOL(ttm_object_file_release); | ||
381 | |||
382 | struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | ||
383 | unsigned int hash_order) | ||
384 | { | ||
385 | struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | ||
386 | unsigned int i; | ||
387 | unsigned int j = 0; | ||
388 | int ret; | ||
389 | |||
390 | if (unlikely(tfile == NULL)) | ||
391 | return NULL; | ||
392 | |||
393 | rwlock_init(&tfile->lock); | ||
394 | tfile->tdev = tdev; | ||
395 | kref_init(&tfile->refcount); | ||
396 | INIT_LIST_HEAD(&tfile->ref_list); | ||
397 | |||
398 | for (i = 0; i < TTM_REF_NUM; ++i) { | ||
399 | ret = drm_ht_create(&tfile->ref_hash[i], hash_order); | ||
400 | if (ret) { | ||
401 | j = i; | ||
402 | goto out_err; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | return tfile; | ||
407 | out_err: | ||
408 | for (i = 0; i < j; ++i) | ||
409 | drm_ht_remove(&tfile->ref_hash[i]); | ||
410 | |||
411 | kfree(tfile); | ||
412 | |||
413 | return NULL; | ||
414 | } | ||
415 | EXPORT_SYMBOL(ttm_object_file_init); | ||
416 | |||
417 | struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global | ||
418 | *mem_glob, | ||
419 | unsigned int hash_order) | ||
420 | { | ||
421 | struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); | ||
422 | int ret; | ||
423 | |||
424 | if (unlikely(tdev == NULL)) | ||
425 | return NULL; | ||
426 | |||
427 | tdev->mem_glob = mem_glob; | ||
428 | rwlock_init(&tdev->object_lock); | ||
429 | atomic_set(&tdev->object_count, 0); | ||
430 | ret = drm_ht_create(&tdev->object_hash, hash_order); | ||
431 | |||
432 | if (likely(ret == 0)) | ||
433 | return tdev; | ||
434 | |||
435 | kfree(tdev); | ||
436 | return NULL; | ||
437 | } | ||
438 | EXPORT_SYMBOL(ttm_object_device_init); | ||
439 | |||
440 | void ttm_object_device_release(struct ttm_object_device **p_tdev) | ||
441 | { | ||
442 | struct ttm_object_device *tdev = *p_tdev; | ||
443 | |||
444 | *p_tdev = NULL; | ||
445 | |||
446 | write_lock(&tdev->object_lock); | ||
447 | drm_ht_remove(&tdev->object_hash); | ||
448 | write_unlock(&tdev->object_lock); | ||
449 | |||
450 | kfree(tdev); | ||
451 | } | ||
452 | EXPORT_SYMBOL(ttm_object_device_release); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7bcb89f39ce8..9c2b1cc5dba5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
192 | ttm->state = tt_unbound; | 192 | ttm->state = tt_unbound; |
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | EXPORT_SYMBOL(ttm_tt_populate); | ||
195 | 196 | ||
196 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
197 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |