diff options
author | Dave Airlie <airlied@redhat.com> | 2009-12-07 16:03:55 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-12-07 16:03:55 -0500 |
commit | f84676185368e36c6bc0eeab87ab73ed39042648 (patch) | |
tree | 7d97885644c7b6a09c244de11af94409da665e62 /drivers/gpu/drm | |
parent | 22dd50133ab7548adb23e86c302d6e8b75817e8c (diff) | |
parent | 447aeb907e417e0e837b4a4026d5081c88b6e8ca (diff) |
Merge remote branch 'origin/drm-core-next' into test
Conflicts:
drivers/gpu/drm/drm_fb_helper.c
Diffstat (limited to 'drivers/gpu/drm')
25 files changed, 1648 insertions, 371 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 3c8827a7aabd..91567ac806f1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
15 | 15 | ||
16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
17 | 17 | ||
18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o | 18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o |
19 | 19 | ||
20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | 20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o |
21 | 21 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9b..4fe321dc900c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = | |||
125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, | 125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, |
126 | drm_tv_subconnector_enum_list) | 126 | drm_tv_subconnector_enum_list) |
127 | 127 | ||
128 | static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { | ||
129 | { DRM_MODE_DIRTY_OFF, "Off" }, | ||
130 | { DRM_MODE_DIRTY_ON, "On" }, | ||
131 | { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, | ||
132 | }; | ||
133 | |||
134 | DRM_ENUM_NAME_FN(drm_get_dirty_info_name, | ||
135 | drm_dirty_info_enum_list) | ||
136 | |||
128 | struct drm_conn_prop_enum_list { | 137 | struct drm_conn_prop_enum_list { |
129 | int type; | 138 | int type; |
130 | char *name; | 139 | char *name; |
@@ -802,6 +811,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev) | |||
802 | EXPORT_SYMBOL(drm_mode_create_dithering_property); | 811 | EXPORT_SYMBOL(drm_mode_create_dithering_property); |
803 | 812 | ||
804 | /** | 813 | /** |
814 | * drm_mode_create_dirty_property - create dirty property | ||
815 | * @dev: DRM device | ||
816 | * | ||
817 | * Called by a driver the first time it's needed, must be attached to desired | ||
818 | * connectors. | ||
819 | */ | ||
820 | int drm_mode_create_dirty_info_property(struct drm_device *dev) | ||
821 | { | ||
822 | struct drm_property *dirty_info; | ||
823 | int i; | ||
824 | |||
825 | if (dev->mode_config.dirty_info_property) | ||
826 | return 0; | ||
827 | |||
828 | dirty_info = | ||
829 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
830 | DRM_MODE_PROP_IMMUTABLE, | ||
831 | "dirty", | ||
832 | ARRAY_SIZE(drm_dirty_info_enum_list)); | ||
833 | for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) | ||
834 | drm_property_add_enum(dirty_info, i, | ||
835 | drm_dirty_info_enum_list[i].type, | ||
836 | drm_dirty_info_enum_list[i].name); | ||
837 | dev->mode_config.dirty_info_property = dirty_info; | ||
838 | |||
839 | return 0; | ||
840 | } | ||
841 | EXPORT_SYMBOL(drm_mode_create_dirty_info_property); | ||
842 | |||
843 | /** | ||
805 | * drm_mode_config_init - initialize DRM mode_configuration structure | 844 | * drm_mode_config_init - initialize DRM mode_configuration structure |
806 | * @dev: DRM device | 845 | * @dev: DRM device |
807 | * | 846 | * |
@@ -1753,6 +1792,71 @@ out: | |||
1753 | return ret; | 1792 | return ret; |
1754 | } | 1793 | } |
1755 | 1794 | ||
1795 | int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | ||
1796 | void *data, struct drm_file *file_priv) | ||
1797 | { | ||
1798 | struct drm_clip_rect __user *clips_ptr; | ||
1799 | struct drm_clip_rect *clips = NULL; | ||
1800 | struct drm_mode_fb_dirty_cmd *r = data; | ||
1801 | struct drm_mode_object *obj; | ||
1802 | struct drm_framebuffer *fb; | ||
1803 | unsigned flags; | ||
1804 | int num_clips; | ||
1805 | int ret = 0; | ||
1806 | |||
1807 | mutex_lock(&dev->mode_config.mutex); | ||
1808 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | ||
1809 | if (!obj) { | ||
1810 | DRM_ERROR("invalid framebuffer id\n"); | ||
1811 | ret = -EINVAL; | ||
1812 | goto out_err1; | ||
1813 | } | ||
1814 | fb = obj_to_fb(obj); | ||
1815 | |||
1816 | num_clips = r->num_clips; | ||
1817 | clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; | ||
1818 | |||
1819 | if (!num_clips != !clips_ptr) { | ||
1820 | ret = -EINVAL; | ||
1821 | goto out_err1; | ||
1822 | } | ||
1823 | |||
1824 | flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; | ||
1825 | |||
1826 | /* If userspace annotates copy, clips must come in pairs */ | ||
1827 | if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { | ||
1828 | ret = -EINVAL; | ||
1829 | goto out_err1; | ||
1830 | } | ||
1831 | |||
1832 | if (num_clips && clips_ptr) { | ||
1833 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
1834 | if (!clips) { | ||
1835 | ret = -ENOMEM; | ||
1836 | goto out_err1; | ||
1837 | } | ||
1838 | |||
1839 | ret = copy_from_user(clips, clips_ptr, | ||
1840 | num_clips * sizeof(*clips)); | ||
1841 | if (ret) | ||
1842 | goto out_err2; | ||
1843 | } | ||
1844 | |||
1845 | if (fb->funcs->dirty) { | ||
1846 | ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); | ||
1847 | } else { | ||
1848 | ret = -ENOSYS; | ||
1849 | goto out_err2; | ||
1850 | } | ||
1851 | |||
1852 | out_err2: | ||
1853 | kfree(clips); | ||
1854 | out_err1: | ||
1855 | mutex_unlock(&dev->mode_config.mutex); | ||
1856 | return ret; | ||
1857 | } | ||
1858 | |||
1859 | |||
1756 | /** | 1860 | /** |
1757 | * drm_fb_release - remove and free the FBs on this file | 1861 | * drm_fb_release - remove and free the FBs on this file |
1758 | * @filp: file * from the ioctl | 1862 | * @filp: file * from the ioctl |
@@ -2478,3 +2582,72 @@ out: | |||
2478 | mutex_unlock(&dev->mode_config.mutex); | 2582 | mutex_unlock(&dev->mode_config.mutex); |
2479 | return ret; | 2583 | return ret; |
2480 | } | 2584 | } |
2585 | |||
2586 | int drm_mode_page_flip_ioctl(struct drm_device *dev, | ||
2587 | void *data, struct drm_file *file_priv) | ||
2588 | { | ||
2589 | struct drm_mode_crtc_page_flip *page_flip = data; | ||
2590 | struct drm_mode_object *obj; | ||
2591 | struct drm_crtc *crtc; | ||
2592 | struct drm_framebuffer *fb; | ||
2593 | struct drm_pending_vblank_event *e = NULL; | ||
2594 | unsigned long flags; | ||
2595 | int ret = -EINVAL; | ||
2596 | |||
2597 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || | ||
2598 | page_flip->reserved != 0) | ||
2599 | return -EINVAL; | ||
2600 | |||
2601 | mutex_lock(&dev->mode_config.mutex); | ||
2602 | obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
2603 | if (!obj) | ||
2604 | goto out; | ||
2605 | crtc = obj_to_crtc(obj); | ||
2606 | |||
2607 | if (crtc->funcs->page_flip == NULL) | ||
2608 | goto out; | ||
2609 | |||
2610 | obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); | ||
2611 | if (!obj) | ||
2612 | goto out; | ||
2613 | fb = obj_to_fb(obj); | ||
2614 | |||
2615 | if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { | ||
2616 | ret = -ENOMEM; | ||
2617 | spin_lock_irqsave(&dev->event_lock, flags); | ||
2618 | if (file_priv->event_space < sizeof e->event) { | ||
2619 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
2620 | goto out; | ||
2621 | } | ||
2622 | file_priv->event_space -= sizeof e->event; | ||
2623 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
2624 | |||
2625 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
2626 | if (e == NULL) { | ||
2627 | spin_lock_irqsave(&dev->event_lock, flags); | ||
2628 | file_priv->event_space += sizeof e->event; | ||
2629 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
2630 | goto out; | ||
2631 | } | ||
2632 | |||
2633 | e->event.base.type = DRM_EVENT_VBLANK; | ||
2634 | e->event.base.length = sizeof e->event; | ||
2635 | e->event.user_data = page_flip->user_data; | ||
2636 | e->base.event = &e->event.base; | ||
2637 | e->base.file_priv = file_priv; | ||
2638 | e->base.destroy = | ||
2639 | (void (*) (struct drm_pending_event *)) kfree; | ||
2640 | } | ||
2641 | |||
2642 | ret = crtc->funcs->page_flip(crtc, fb, e); | ||
2643 | if (ret) { | ||
2644 | spin_lock_irqsave(&dev->event_lock, flags); | ||
2645 | file_priv->event_space += sizeof e->event; | ||
2646 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
2647 | kfree(e); | ||
2648 | } | ||
2649 | |||
2650 | out: | ||
2651 | mutex_unlock(&dev->mode_config.mutex); | ||
2652 | return ret; | ||
2653 | } | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index bbfd110a7168..3963b3c1081a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
109 | 109 | ||
110 | count = (*connector_funcs->get_modes)(connector); | 110 | count = (*connector_funcs->get_modes)(connector); |
111 | if (!count) { | 111 | if (!count) { |
112 | count = drm_add_modes_noedid(connector, 800, 600); | 112 | count = drm_add_modes_noedid(connector, 1024, 768); |
113 | if (!count) | 113 | if (!count) |
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c index a63b6f57d2d4..f1c7c856e9db 100644 --- a/drivers/gpu/drm/i915/intel_dp_i2c.c +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c | |||
@@ -28,84 +28,20 @@ | |||
28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
31 | #include "intel_dp.h" | 31 | #include "drm_dp_helper.h" |
32 | #include "drmP.h" | 32 | #include "drmP.h" |
33 | 33 | ||
34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | 34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ |
35 | |||
36 | #define MODE_I2C_START 1 | ||
37 | #define MODE_I2C_WRITE 2 | ||
38 | #define MODE_I2C_READ 4 | ||
39 | #define MODE_I2C_STOP 8 | ||
40 | |||
41 | static int | 35 | static int |
42 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | 36 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, |
43 | uint8_t write_byte, uint8_t *read_byte) | 37 | uint8_t write_byte, uint8_t *read_byte) |
44 | { | 38 | { |
45 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 39 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
46 | uint16_t address = algo_data->address; | ||
47 | uint8_t msg[5]; | ||
48 | uint8_t reply[2]; | ||
49 | int msg_bytes; | ||
50 | int reply_bytes; | ||
51 | int ret; | 40 | int ret; |
52 | 41 | ||
53 | /* Set up the command byte */ | 42 | ret = (*algo_data->aux_ch)(adapter, mode, |
54 | if (mode & MODE_I2C_READ) | 43 | write_byte, read_byte); |
55 | msg[0] = AUX_I2C_READ << 4; | 44 | return ret; |
56 | else | ||
57 | msg[0] = AUX_I2C_WRITE << 4; | ||
58 | |||
59 | if (!(mode & MODE_I2C_STOP)) | ||
60 | msg[0] |= AUX_I2C_MOT << 4; | ||
61 | |||
62 | msg[1] = address >> 8; | ||
63 | msg[2] = address; | ||
64 | |||
65 | switch (mode) { | ||
66 | case MODE_I2C_WRITE: | ||
67 | msg[3] = 0; | ||
68 | msg[4] = write_byte; | ||
69 | msg_bytes = 5; | ||
70 | reply_bytes = 1; | ||
71 | break; | ||
72 | case MODE_I2C_READ: | ||
73 | msg[3] = 0; | ||
74 | msg_bytes = 4; | ||
75 | reply_bytes = 2; | ||
76 | break; | ||
77 | default: | ||
78 | msg_bytes = 3; | ||
79 | reply_bytes = 1; | ||
80 | break; | ||
81 | } | ||
82 | |||
83 | for (;;) { | ||
84 | ret = (*algo_data->aux_ch)(adapter, | ||
85 | msg, msg_bytes, | ||
86 | reply, reply_bytes); | ||
87 | if (ret < 0) { | ||
88 | DRM_DEBUG("aux_ch failed %d\n", ret); | ||
89 | return ret; | ||
90 | } | ||
91 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
92 | case AUX_I2C_REPLY_ACK: | ||
93 | if (mode == MODE_I2C_READ) { | ||
94 | *read_byte = reply[1]; | ||
95 | } | ||
96 | return reply_bytes - 1; | ||
97 | case AUX_I2C_REPLY_NACK: | ||
98 | DRM_DEBUG("aux_ch nack\n"); | ||
99 | return -EREMOTEIO; | ||
100 | case AUX_I2C_REPLY_DEFER: | ||
101 | DRM_DEBUG("aux_ch defer\n"); | ||
102 | udelay(100); | ||
103 | break; | ||
104 | default: | ||
105 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
106 | return -EREMOTEIO; | ||
107 | } | ||
108 | } | ||
109 | } | 45 | } |
110 | 46 | ||
111 | /* | 47 | /* |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a75ca63deea6..ff2f1042cb44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) | ||
148 | }; | 150 | }; |
149 | 151 | ||
150 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 152 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
@@ -366,6 +368,29 @@ module_init(drm_core_init); | |||
366 | module_exit(drm_core_exit); | 368 | module_exit(drm_core_exit); |
367 | 369 | ||
368 | /** | 370 | /** |
371 | * Copy and IOCTL return string to user space | ||
372 | */ | ||
373 | static int drm_copy_field(char *buf, size_t *buf_len, const char *value) | ||
374 | { | ||
375 | int len; | ||
376 | |||
377 | /* don't overflow userbuf */ | ||
378 | len = strlen(value); | ||
379 | if (len > *buf_len) | ||
380 | len = *buf_len; | ||
381 | |||
382 | /* let userspace know exact length of driver value (which could be | ||
383 | * larger than the userspace-supplied buffer) */ | ||
384 | *buf_len = strlen(value); | ||
385 | |||
386 | /* finally, try filling in the userbuf */ | ||
387 | if (len && buf) | ||
388 | if (copy_to_user(buf, value, len)) | ||
389 | return -EFAULT; | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | /** | ||
369 | * Get version information | 394 | * Get version information |
370 | * | 395 | * |
371 | * \param inode device inode. | 396 | * \param inode device inode. |
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data, | |||
380 | struct drm_file *file_priv) | 405 | struct drm_file *file_priv) |
381 | { | 406 | { |
382 | struct drm_version *version = data; | 407 | struct drm_version *version = data; |
383 | int len; | 408 | int err; |
384 | 409 | ||
385 | version->version_major = dev->driver->major; | 410 | version->version_major = dev->driver->major; |
386 | version->version_minor = dev->driver->minor; | 411 | version->version_minor = dev->driver->minor; |
387 | version->version_patchlevel = dev->driver->patchlevel; | 412 | version->version_patchlevel = dev->driver->patchlevel; |
388 | DRM_COPY(version->name, dev->driver->name); | 413 | err = drm_copy_field(version->name, &version->name_len, |
389 | DRM_COPY(version->date, dev->driver->date); | 414 | dev->driver->name); |
390 | DRM_COPY(version->desc, dev->driver->desc); | 415 | if (!err) |
391 | 416 | err = drm_copy_field(version->date, &version->date_len, | |
392 | return 0; | 417 | dev->driver->date); |
418 | if (!err) | ||
419 | err = drm_copy_field(version->desc, &version->desc_len, | ||
420 | dev->driver->desc); | ||
421 | |||
422 | return err; | ||
393 | } | 423 | } |
394 | 424 | ||
395 | /** | 425 | /** |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index b54ba63d506e..c39b26f1abed 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -123,18 +123,20 @@ static const u8 edid_header[] = { | |||
123 | */ | 123 | */ |
124 | static bool edid_is_valid(struct edid *edid) | 124 | static bool edid_is_valid(struct edid *edid) |
125 | { | 125 | { |
126 | int i; | 126 | int i, score = 0; |
127 | u8 csum = 0; | 127 | u8 csum = 0; |
128 | u8 *raw_edid = (u8 *)edid; | 128 | u8 *raw_edid = (u8 *)edid; |
129 | 129 | ||
130 | if (memcmp(edid->header, edid_header, sizeof(edid_header))) | 130 | for (i = 0; i < sizeof(edid_header); i++) |
131 | goto bad; | 131 | if (raw_edid[i] == edid_header[i]) |
132 | if (edid->version != 1) { | 132 | score++; |
133 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | 133 | |
134 | if (score == 8) ; | ||
135 | else if (score >= 6) { | ||
136 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | ||
137 | memcpy(raw_edid, edid_header, sizeof(edid_header)); | ||
138 | } else | ||
134 | goto bad; | 139 | goto bad; |
135 | } | ||
136 | if (edid->revision > 4) | ||
137 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
138 | 140 | ||
139 | for (i = 0; i < EDID_LENGTH; i++) | 141 | for (i = 0; i < EDID_LENGTH; i++) |
140 | csum += raw_edid[i]; | 142 | csum += raw_edid[i]; |
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid) | |||
143 | goto bad; | 145 | goto bad; |
144 | } | 146 | } |
145 | 147 | ||
148 | if (edid->version != 1) { | ||
149 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | ||
150 | goto bad; | ||
151 | } | ||
152 | |||
153 | if (edid->revision > 4) | ||
154 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
155 | |||
146 | return 1; | 156 | return 1; |
147 | 157 | ||
148 | bad: | 158 | bad: |
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
481 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, | 491 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, |
482 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 492 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
483 | }; | 493 | }; |
494 | static const int drm_num_dmt_modes = | ||
495 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
484 | 496 | ||
485 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | 497 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, |
486 | int hsize, int vsize, int fresh) | 498 | int hsize, int vsize, int fresh) |
487 | { | 499 | { |
488 | int i, count; | 500 | int i; |
489 | struct drm_display_mode *ptr, *mode; | 501 | struct drm_display_mode *ptr, *mode; |
490 | 502 | ||
491 | count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
492 | mode = NULL; | 503 | mode = NULL; |
493 | for (i = 0; i < count; i++) { | 504 | for (i = 0; i < drm_num_dmt_modes; i++) { |
494 | ptr = &drm_dmt_modes[i]; | 505 | ptr = &drm_dmt_modes[i]; |
495 | if (hsize == ptr->hdisplay && | 506 | if (hsize == ptr->hdisplay && |
496 | vsize == ptr->vdisplay && | 507 | vsize == ptr->vdisplay && |
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
834 | return modes; | 845 | return modes; |
835 | } | 846 | } |
836 | 847 | ||
848 | /* | ||
849 | * XXX fix this for: | ||
850 | * - GTF secondary curve formula | ||
851 | * - EDID 1.4 range offsets | ||
852 | * - CVT extended bits | ||
853 | */ | ||
854 | static bool | ||
855 | mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) | ||
856 | { | ||
857 | struct detailed_data_monitor_range *range; | ||
858 | int hsync, vrefresh; | ||
859 | |||
860 | range = &timing->data.other_data.data.range; | ||
861 | |||
862 | hsync = drm_mode_hsync(mode); | ||
863 | vrefresh = drm_mode_vrefresh(mode); | ||
864 | |||
865 | if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) | ||
866 | return false; | ||
867 | |||
868 | if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) | ||
869 | return false; | ||
870 | |||
871 | if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { | ||
872 | /* be forgiving since it's in units of 10MHz */ | ||
873 | int max_clock = range->pixel_clock_mhz * 10 + 9; | ||
874 | max_clock *= 1000; | ||
875 | if (mode->clock > max_clock) | ||
876 | return false; | ||
877 | } | ||
878 | |||
879 | return true; | ||
880 | } | ||
881 | |||
882 | /* | ||
883 | * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will | ||
884 | * need to account for them. | ||
885 | */ | ||
886 | static int drm_gtf_modes_for_range(struct drm_connector *connector, | ||
887 | struct detailed_timing *timing) | ||
888 | { | ||
889 | int i, modes = 0; | ||
890 | struct drm_display_mode *newmode; | ||
891 | struct drm_device *dev = connector->dev; | ||
892 | |||
893 | for (i = 0; i < drm_num_dmt_modes; i++) { | ||
894 | if (mode_in_range(drm_dmt_modes + i, timing)) { | ||
895 | newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); | ||
896 | if (newmode) { | ||
897 | drm_mode_probed_add(connector, newmode); | ||
898 | modes++; | ||
899 | } | ||
900 | } | ||
901 | } | ||
902 | |||
903 | return modes; | ||
904 | } | ||
905 | |||
906 | static int drm_cvt_modes(struct drm_connector *connector, | ||
907 | struct detailed_timing *timing) | ||
908 | { | ||
909 | int i, j, modes = 0; | ||
910 | struct drm_display_mode *newmode; | ||
911 | struct drm_device *dev = connector->dev; | ||
912 | struct cvt_timing *cvt; | ||
913 | const int rates[] = { 60, 85, 75, 60, 50 }; | ||
914 | |||
915 | for (i = 0; i < 4; i++) { | ||
916 | int width, height; | ||
917 | cvt = &(timing->data.other_data.data.cvt[i]); | ||
918 | |||
919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; | ||
920 | switch (cvt->code[1] & 0xc0) { | ||
921 | case 0x00: | ||
922 | width = height * 4 / 3; | ||
923 | break; | ||
924 | case 0x40: | ||
925 | width = height * 16 / 9; | ||
926 | break; | ||
927 | case 0x80: | ||
928 | width = height * 16 / 10; | ||
929 | break; | ||
930 | case 0xc0: | ||
931 | width = height * 15 / 9; | ||
932 | break; | ||
933 | } | ||
934 | |||
935 | for (j = 1; j < 5; j++) { | ||
936 | if (cvt->code[2] & (1 << j)) { | ||
937 | newmode = drm_cvt_mode(dev, width, height, | ||
938 | rates[j], j == 0, | ||
939 | false, false); | ||
940 | if (newmode) { | ||
941 | drm_mode_probed_add(connector, newmode); | ||
942 | modes++; | ||
943 | } | ||
944 | } | ||
945 | } | ||
946 | } | ||
947 | |||
948 | return modes; | ||
949 | } | ||
950 | |||
951 | static int add_detailed_modes(struct drm_connector *connector, | ||
952 | struct detailed_timing *timing, | ||
953 | struct edid *edid, u32 quirks, int preferred) | ||
954 | { | ||
955 | int i, modes = 0; | ||
956 | struct detailed_non_pixel *data = &timing->data.other_data; | ||
957 | int timing_level = standard_timing_level(edid); | ||
958 | int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); | ||
959 | struct drm_display_mode *newmode; | ||
960 | struct drm_device *dev = connector->dev; | ||
961 | |||
962 | if (timing->pixel_clock) { | ||
963 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
964 | if (!newmode) | ||
965 | return 0; | ||
966 | |||
967 | if (preferred) | ||
968 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | ||
969 | |||
970 | drm_mode_probed_add(connector, newmode); | ||
971 | return 1; | ||
972 | } | ||
973 | |||
974 | /* other timing types */ | ||
975 | switch (data->type) { | ||
976 | case EDID_DETAIL_MONITOR_RANGE: | ||
977 | if (gtf) | ||
978 | modes += drm_gtf_modes_for_range(connector, timing); | ||
979 | break; | ||
980 | case EDID_DETAIL_STD_MODES: | ||
981 | /* Six modes per detailed section */ | ||
982 | for (i = 0; i < 6; i++) { | ||
983 | struct std_timing *std; | ||
984 | struct drm_display_mode *newmode; | ||
985 | |||
986 | std = &data->data.timings[i]; | ||
987 | newmode = drm_mode_std(dev, std, edid->revision, | ||
988 | timing_level); | ||
989 | if (newmode) { | ||
990 | drm_mode_probed_add(connector, newmode); | ||
991 | modes++; | ||
992 | } | ||
993 | } | ||
994 | break; | ||
995 | case EDID_DETAIL_CVT_3BYTE: | ||
996 | modes += drm_cvt_modes(connector, timing); | ||
997 | break; | ||
998 | default: | ||
999 | break; | ||
1000 | } | ||
1001 | |||
1002 | return modes; | ||
1003 | } | ||
1004 | |||
837 | /** | 1005 | /** |
838 | * add_detailed_modes - get detailed mode info from EDID data | 1006 | * add_detailed_info - get detailed mode info from EDID data |
839 | * @connector: attached connector | 1007 | * @connector: attached connector |
840 | * @edid: EDID block to scan | 1008 | * @edid: EDID block to scan |
841 | * @quirks: quirks to apply | 1009 | * @quirks: quirks to apply |
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
846 | static int add_detailed_info(struct drm_connector *connector, | 1014 | static int add_detailed_info(struct drm_connector *connector, |
847 | struct edid *edid, u32 quirks) | 1015 | struct edid *edid, u32 quirks) |
848 | { | 1016 | { |
849 | struct drm_device *dev = connector->dev; | 1017 | int i, modes = 0; |
850 | int i, j, modes = 0; | ||
851 | int timing_level; | ||
852 | |||
853 | timing_level = standard_timing_level(edid); | ||
854 | 1018 | ||
855 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { | 1019 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { |
856 | struct detailed_timing *timing = &edid->detailed_timings[i]; | 1020 | struct detailed_timing *timing = &edid->detailed_timings[i]; |
857 | struct detailed_non_pixel *data = &timing->data.other_data; | 1021 | int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); |
858 | struct drm_display_mode *newmode; | ||
859 | |||
860 | /* X server check is version 1.1 or higher */ | ||
861 | if (edid->version == 1 && edid->revision >= 1 && | ||
862 | !timing->pixel_clock) { | ||
863 | /* Other timing or info */ | ||
864 | switch (data->type) { | ||
865 | case EDID_DETAIL_MONITOR_SERIAL: | ||
866 | break; | ||
867 | case EDID_DETAIL_MONITOR_STRING: | ||
868 | break; | ||
869 | case EDID_DETAIL_MONITOR_RANGE: | ||
870 | /* Get monitor range data */ | ||
871 | break; | ||
872 | case EDID_DETAIL_MONITOR_NAME: | ||
873 | break; | ||
874 | case EDID_DETAIL_MONITOR_CPDATA: | ||
875 | break; | ||
876 | case EDID_DETAIL_STD_MODES: | ||
877 | for (j = 0; j < 6; i++) { | ||
878 | struct std_timing *std; | ||
879 | struct drm_display_mode *newmode; | ||
880 | |||
881 | std = &data->data.timings[j]; | ||
882 | newmode = drm_mode_std(dev, std, | ||
883 | edid->revision, | ||
884 | timing_level); | ||
885 | if (newmode) { | ||
886 | drm_mode_probed_add(connector, newmode); | ||
887 | modes++; | ||
888 | } | ||
889 | } | ||
890 | break; | ||
891 | default: | ||
892 | break; | ||
893 | } | ||
894 | } else { | ||
895 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
896 | if (!newmode) | ||
897 | continue; | ||
898 | 1022 | ||
899 | /* First detailed mode is preferred */ | 1023 | /* In 1.0, only timings are allowed */ |
900 | if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) | 1024 | if (!timing->pixel_clock && edid->version == 1 && |
901 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | 1025 | edid->revision == 0) |
902 | drm_mode_probed_add(connector, newmode); | 1026 | continue; |
903 | 1027 | ||
904 | modes++; | 1028 | modes += add_detailed_modes(connector, timing, edid, quirks, |
905 | } | 1029 | preferred); |
906 | } | 1030 | } |
907 | 1031 | ||
908 | return modes; | 1032 | return modes; |
909 | } | 1033 | } |
1034 | |||
910 | /** | 1035 | /** |
911 | * add_detailed_mode_eedid - get detailed mode info from addtional timing | 1036 | * add_detailed_mode_eedid - get detailed mode info from addtional timing |
912 | * EDID block | 1037 | * EDID block |
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector, | |||
920 | static int add_detailed_info_eedid(struct drm_connector *connector, | 1045 | static int add_detailed_info_eedid(struct drm_connector *connector, |
921 | struct edid *edid, u32 quirks) | 1046 | struct edid *edid, u32 quirks) |
922 | { | 1047 | { |
923 | struct drm_device *dev = connector->dev; | 1048 | int i, modes = 0; |
924 | int i, j, modes = 0; | ||
925 | char *edid_ext = NULL; | 1049 | char *edid_ext = NULL; |
926 | struct detailed_timing *timing; | 1050 | struct detailed_timing *timing; |
927 | struct detailed_non_pixel *data; | ||
928 | struct drm_display_mode *newmode; | ||
929 | int edid_ext_num; | 1051 | int edid_ext_num; |
930 | int start_offset, end_offset; | 1052 | int start_offset, end_offset; |
931 | int timing_level; | 1053 | int timing_level; |
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
976 | for (i = start_offset; i < end_offset; | 1098 | for (i = start_offset; i < end_offset; |
977 | i += sizeof(struct detailed_timing)) { | 1099 | i += sizeof(struct detailed_timing)) { |
978 | timing = (struct detailed_timing *)(edid_ext + i); | 1100 | timing = (struct detailed_timing *)(edid_ext + i); |
979 | data = &timing->data.other_data; | 1101 | modes += add_detailed_modes(connector, timing, edid, quirks, 0); |
980 | /* Detailed mode timing */ | ||
981 | if (timing->pixel_clock) { | ||
982 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
983 | if (!newmode) | ||
984 | continue; | ||
985 | |||
986 | drm_mode_probed_add(connector, newmode); | ||
987 | |||
988 | modes++; | ||
989 | continue; | ||
990 | } | ||
991 | |||
992 | /* Other timing or info */ | ||
993 | switch (data->type) { | ||
994 | case EDID_DETAIL_MONITOR_SERIAL: | ||
995 | break; | ||
996 | case EDID_DETAIL_MONITOR_STRING: | ||
997 | break; | ||
998 | case EDID_DETAIL_MONITOR_RANGE: | ||
999 | /* Get monitor range data */ | ||
1000 | break; | ||
1001 | case EDID_DETAIL_MONITOR_NAME: | ||
1002 | break; | ||
1003 | case EDID_DETAIL_MONITOR_CPDATA: | ||
1004 | break; | ||
1005 | case EDID_DETAIL_STD_MODES: | ||
1006 | /* Five modes per detailed section */ | ||
1007 | for (j = 0; j < 5; i++) { | ||
1008 | struct std_timing *std; | ||
1009 | struct drm_display_mode *newmode; | ||
1010 | |||
1011 | std = &data->data.timings[j]; | ||
1012 | newmode = drm_mode_std(dev, std, | ||
1013 | edid->revision, | ||
1014 | timing_level); | ||
1015 | if (newmode) { | ||
1016 | drm_mode_probed_add(connector, newmode); | ||
1017 | modes++; | ||
1018 | } | ||
1019 | } | ||
1020 | break; | ||
1021 | default: | ||
1022 | break; | ||
1023 | } | ||
1024 | } | 1102 | } |
1025 | 1103 | ||
1026 | return modes; | 1104 | return modes; |
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector, | |||
1066 | struct i2c_adapter *adapter, | 1144 | struct i2c_adapter *adapter, |
1067 | char *buf, int len) | 1145 | char *buf, int len) |
1068 | { | 1146 | { |
1069 | int ret; | 1147 | int i; |
1070 | 1148 | ||
1071 | ret = drm_do_probe_ddc_edid(adapter, buf, len); | 1149 | for (i = 0; i < 4; i++) { |
1072 | if (ret != 0) { | 1150 | if (drm_do_probe_ddc_edid(adapter, buf, len)) |
1073 | goto end; | 1151 | return -1; |
1074 | } | 1152 | if (edid_is_valid((struct edid *)buf)) |
1075 | if (!edid_is_valid((struct edid *)buf)) { | 1153 | return 0; |
1076 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
1077 | drm_get_connector_name(connector)); | ||
1078 | ret = -1; | ||
1079 | } | 1154 | } |
1080 | end: | 1155 | |
1081 | return ret; | 1156 | /* repeated checksum failures; warn, but carry on */ |
1157 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
1158 | drm_get_connector_name(connector)); | ||
1159 | return -1; | ||
1082 | } | 1160 | } |
1083 | 1161 | ||
1084 | /** | 1162 | /** |
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector, | |||
1296 | ptr->vdisplay > vdisplay) | 1374 | ptr->vdisplay > vdisplay) |
1297 | continue; | 1375 | continue; |
1298 | } | 1376 | } |
1377 | if (drm_mode_vrefresh(ptr) > 61) | ||
1378 | continue; | ||
1299 | mode = drm_mode_duplicate(dev, ptr); | 1379 | mode = drm_mode_duplicate(dev, ptr); |
1300 | if (mode) { | 1380 | if (mode) { |
1301 | drm_mode_probed_add(connector, mode); | 1381 | drm_mode_probed_add(connector, mode); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 65ef011fa8ba..34afe15ebc0c 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
373 | mutex_unlock(&dev->mode_config.mutex); | 373 | mutex_unlock(&dev->mode_config.mutex); |
374 | } | 374 | } |
375 | } | 375 | } |
376 | if (dpms_mode == DRM_MODE_DPMS_OFF) { | 376 | mutex_lock(&dev->mode_config.mutex); |
377 | mutex_lock(&dev->mode_config.mutex); | 377 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
378 | crtc_funcs->dpms(crtc, dpms_mode); | 378 | mutex_unlock(&dev->mode_config.mutex); |
379 | mutex_unlock(&dev->mode_config.mutex); | ||
380 | } | ||
381 | } | 379 | } |
382 | } | 380 | } |
383 | } | 381 | } |
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
385 | int drm_fb_helper_blank(int blank, struct fb_info *info) | 383 | int drm_fb_helper_blank(int blank, struct fb_info *info) |
386 | { | 384 | { |
387 | switch (blank) { | 385 | switch (blank) { |
386 | /* Display: On; HSync: On, VSync: On */ | ||
388 | case FB_BLANK_UNBLANK: | 387 | case FB_BLANK_UNBLANK: |
389 | drm_fb_helper_on(info); | 388 | drm_fb_helper_on(info); |
390 | break; | 389 | break; |
390 | /* Display: Off; HSync: On, VSync: On */ | ||
391 | case FB_BLANK_NORMAL: | 391 | case FB_BLANK_NORMAL: |
392 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_ON); |
393 | break; | 393 | break; |
394 | /* Display: Off; HSync: Off, VSync: On */ | ||
394 | case FB_BLANK_HSYNC_SUSPEND: | 395 | case FB_BLANK_HSYNC_SUSPEND: |
395 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 396 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); |
396 | break; | 397 | break; |
398 | /* Display: Off; HSync: On, VSync: Off */ | ||
397 | case FB_BLANK_VSYNC_SUSPEND: | 399 | case FB_BLANK_VSYNC_SUSPEND: |
398 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); | 400 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); |
399 | break; | 401 | break; |
402 | /* Display: Off; HSync: Off, VSync: Off */ | ||
400 | case FB_BLANK_POWERDOWN: | 403 | case FB_BLANK_POWERDOWN: |
401 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); | 404 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); |
402 | break; | 405 | break; |
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
905 | 908 | ||
906 | if (new_fb) { | 909 | if (new_fb) { |
907 | info->var.pixclock = 0; | 910 | info->var.pixclock = 0; |
908 | if (register_framebuffer(info) < 0) | 911 | ret = fb_alloc_cmap(&info->cmap, crtc->gamma_size, 0); |
912 | if (ret) | ||
913 | return ret; | ||
914 | if (register_framebuffer(info) < 0) { | ||
915 | fb_dealloc_cmap(&info->cmap); | ||
909 | return -EINVAL; | 916 | return -EINVAL; |
917 | } | ||
910 | } else { | 918 | } else { |
911 | drm_fb_helper_set_par(info); | 919 | drm_fb_helper_set_par(info); |
912 | } | 920 | } |
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
936 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); | 944 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); |
937 | } | 945 | } |
938 | drm_fb_helper_crtc_free(helper); | 946 | drm_fb_helper_crtc_free(helper); |
947 | fb_dealloc_cmap(&helper->fb->fbdev->cmap); | ||
939 | } | 948 | } |
940 | EXPORT_SYMBOL(drm_fb_helper_free); | 949 | EXPORT_SYMBOL(drm_fb_helper_free); |
941 | 950 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 251bc0e3b5ec..08d14df3bb42 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
257 | 257 | ||
258 | INIT_LIST_HEAD(&priv->lhead); | 258 | INIT_LIST_HEAD(&priv->lhead); |
259 | INIT_LIST_HEAD(&priv->fbs); | 259 | INIT_LIST_HEAD(&priv->fbs); |
260 | INIT_LIST_HEAD(&priv->event_list); | ||
261 | init_waitqueue_head(&priv->event_wait); | ||
262 | priv->event_space = 4096; /* set aside 4k for event buffer */ | ||
260 | 263 | ||
261 | if (dev->driver->driver_features & DRIVER_GEM) | 264 | if (dev->driver->driver_features & DRIVER_GEM) |
262 | drm_gem_open(dev, priv); | 265 | drm_gem_open(dev, priv); |
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
297 | goto out_free; | 300 | goto out_free; |
298 | } | 301 | } |
299 | } | 302 | } |
303 | mutex_lock(&dev->struct_mutex); | ||
304 | if (dev->driver->master_set) { | ||
305 | ret = dev->driver->master_set(dev, priv, true); | ||
306 | if (ret) { | ||
307 | /* drop both references if this fails */ | ||
308 | drm_master_put(&priv->minor->master); | ||
309 | drm_master_put(&priv->master); | ||
310 | mutex_unlock(&dev->struct_mutex); | ||
311 | goto out_free; | ||
312 | } | ||
313 | } | ||
314 | mutex_unlock(&dev->struct_mutex); | ||
300 | } else { | 315 | } else { |
301 | /* get a reference to the master */ | 316 | /* get a reference to the master */ |
302 | priv->master = drm_master_get(priv->minor->master); | 317 | priv->master = drm_master_get(priv->minor->master); |
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) | |||
413 | } | 428 | } |
414 | } | 429 | } |
415 | 430 | ||
431 | static void drm_events_release(struct drm_file *file_priv) | ||
432 | { | ||
433 | struct drm_device *dev = file_priv->minor->dev; | ||
434 | struct drm_pending_event *e, *et; | ||
435 | struct drm_pending_vblank_event *v, *vt; | ||
436 | unsigned long flags; | ||
437 | |||
438 | spin_lock_irqsave(&dev->event_lock, flags); | ||
439 | |||
440 | /* Remove pending flips */ | ||
441 | list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) | ||
442 | if (v->base.file_priv == file_priv) { | ||
443 | list_del(&v->base.link); | ||
444 | drm_vblank_put(dev, v->pipe); | ||
445 | v->base.destroy(&v->base); | ||
446 | } | ||
447 | |||
448 | /* Remove unconsumed events */ | ||
449 | list_for_each_entry_safe(e, et, &file_priv->event_list, link) | ||
450 | e->destroy(e); | ||
451 | |||
452 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
453 | } | ||
454 | |||
416 | /** | 455 | /** |
417 | * Release file. | 456 | * Release file. |
418 | * | 457 | * |
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
451 | if (file_priv->minor->master) | 490 | if (file_priv->minor->master) |
452 | drm_master_release(dev, filp); | 491 | drm_master_release(dev, filp); |
453 | 492 | ||
493 | drm_events_release(file_priv); | ||
494 | |||
454 | if (dev->driver->driver_features & DRIVER_GEM) | 495 | if (dev->driver->driver_features & DRIVER_GEM) |
455 | drm_gem_release(dev, file_priv); | 496 | drm_gem_release(dev, file_priv); |
456 | 497 | ||
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
504 | 545 | ||
505 | if (file_priv->minor->master == file_priv->master) { | 546 | if (file_priv->minor->master == file_priv->master) { |
506 | /* drop the reference held my the minor */ | 547 | /* drop the reference held my the minor */ |
548 | if (dev->driver->master_drop) | ||
549 | dev->driver->master_drop(dev, file_priv, true); | ||
507 | drm_master_put(&file_priv->minor->master); | 550 | drm_master_put(&file_priv->minor->master); |
508 | } | 551 | } |
509 | } | 552 | } |
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp) | |||
544 | } | 587 | } |
545 | EXPORT_SYMBOL(drm_release); | 588 | EXPORT_SYMBOL(drm_release); |
546 | 589 | ||
547 | /** No-op. */ | 590 | static bool |
591 | drm_dequeue_event(struct drm_file *file_priv, | ||
592 | size_t total, size_t max, struct drm_pending_event **out) | ||
593 | { | ||
594 | struct drm_device *dev = file_priv->minor->dev; | ||
595 | struct drm_pending_event *e; | ||
596 | unsigned long flags; | ||
597 | bool ret = false; | ||
598 | |||
599 | spin_lock_irqsave(&dev->event_lock, flags); | ||
600 | |||
601 | *out = NULL; | ||
602 | if (list_empty(&file_priv->event_list)) | ||
603 | goto out; | ||
604 | e = list_first_entry(&file_priv->event_list, | ||
605 | struct drm_pending_event, link); | ||
606 | if (e->event->length + total > max) | ||
607 | goto out; | ||
608 | |||
609 | file_priv->event_space += e->event->length; | ||
610 | list_del(&e->link); | ||
611 | *out = e; | ||
612 | ret = true; | ||
613 | |||
614 | out: | ||
615 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
616 | return ret; | ||
617 | } | ||
618 | |||
619 | ssize_t drm_read(struct file *filp, char __user *buffer, | ||
620 | size_t count, loff_t *offset) | ||
621 | { | ||
622 | struct drm_file *file_priv = filp->private_data; | ||
623 | struct drm_pending_event *e; | ||
624 | size_t total; | ||
625 | ssize_t ret; | ||
626 | |||
627 | ret = wait_event_interruptible(file_priv->event_wait, | ||
628 | !list_empty(&file_priv->event_list)); | ||
629 | if (ret < 0) | ||
630 | return ret; | ||
631 | |||
632 | total = 0; | ||
633 | while (drm_dequeue_event(file_priv, total, count, &e)) { | ||
634 | if (copy_to_user(buffer + total, | ||
635 | e->event, e->event->length)) { | ||
636 | total = -EFAULT; | ||
637 | break; | ||
638 | } | ||
639 | |||
640 | total += e->event->length; | ||
641 | e->destroy(e); | ||
642 | } | ||
643 | |||
644 | return total; | ||
645 | } | ||
646 | EXPORT_SYMBOL(drm_read); | ||
647 | |||
548 | unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) | 648 | unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) |
549 | { | 649 | { |
550 | return 0; | 650 | struct drm_file *file_priv = filp->private_data; |
651 | unsigned int mask = 0; | ||
652 | |||
653 | poll_wait(filp, &file_priv->event_wait, wait); | ||
654 | |||
655 | if (!list_empty(&file_priv->event_list)) | ||
656 | mask |= POLLIN | POLLRDNORM; | ||
657 | |||
658 | return mask; | ||
551 | } | 659 | } |
552 | EXPORT_SYMBOL(drm_poll); | 660 | EXPORT_SYMBOL(drm_poll); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0a6f0b3bdc78..6b3ce6d38848 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -550,6 +550,63 @@ out: | |||
550 | return ret; | 550 | return ret; |
551 | } | 551 | } |
552 | 552 | ||
553 | static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | ||
554 | union drm_wait_vblank *vblwait, | ||
555 | struct drm_file *file_priv) | ||
556 | { | ||
557 | struct drm_pending_vblank_event *e; | ||
558 | struct timeval now; | ||
559 | unsigned long flags; | ||
560 | unsigned int seq; | ||
561 | |||
562 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
563 | if (e == NULL) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | e->pipe = pipe; | ||
567 | e->event.base.type = DRM_EVENT_VBLANK; | ||
568 | e->event.base.length = sizeof e->event; | ||
569 | e->event.user_data = vblwait->request.signal; | ||
570 | e->base.event = &e->event.base; | ||
571 | e->base.file_priv = file_priv; | ||
572 | e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; | ||
573 | |||
574 | do_gettimeofday(&now); | ||
575 | spin_lock_irqsave(&dev->event_lock, flags); | ||
576 | |||
577 | if (file_priv->event_space < sizeof e->event) { | ||
578 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
579 | kfree(e); | ||
580 | return -ENOMEM; | ||
581 | } | ||
582 | |||
583 | file_priv->event_space -= sizeof e->event; | ||
584 | seq = drm_vblank_count(dev, pipe); | ||
585 | if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && | ||
586 | (seq - vblwait->request.sequence) <= (1 << 23)) { | ||
587 | vblwait->request.sequence = seq + 1; | ||
588 | vblwait->reply.sequence = vblwait->request.sequence; | ||
589 | } | ||
590 | |||
591 | DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", | ||
592 | vblwait->request.sequence, seq, pipe); | ||
593 | |||
594 | e->event.sequence = vblwait->request.sequence; | ||
595 | if ((seq - vblwait->request.sequence) <= (1 << 23)) { | ||
596 | e->event.tv_sec = now.tv_sec; | ||
597 | e->event.tv_usec = now.tv_usec; | ||
598 | drm_vblank_put(dev, e->pipe); | ||
599 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | ||
600 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
601 | } else { | ||
602 | list_add_tail(&e->base.link, &dev->vblank_event_list); | ||
603 | } | ||
604 | |||
605 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
553 | /** | 610 | /** |
554 | * Wait for VBLANK. | 611 | * Wait for VBLANK. |
555 | * | 612 | * |
@@ -609,6 +666,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
609 | goto done; | 666 | goto done; |
610 | } | 667 | } |
611 | 668 | ||
669 | if (flags & _DRM_VBLANK_EVENT) | ||
670 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); | ||
671 | |||
612 | if ((flags & _DRM_VBLANK_NEXTONMISS) && | 672 | if ((flags & _DRM_VBLANK_NEXTONMISS) && |
613 | (seq - vblwait->request.sequence) <= (1<<23)) { | 673 | (seq - vblwait->request.sequence) <= (1<<23)) { |
614 | vblwait->request.sequence = seq + 1; | 674 | vblwait->request.sequence = seq + 1; |
@@ -641,6 +701,38 @@ done: | |||
641 | return ret; | 701 | return ret; |
642 | } | 702 | } |
643 | 703 | ||
704 | void drm_handle_vblank_events(struct drm_device *dev, int crtc) | ||
705 | { | ||
706 | struct drm_pending_vblank_event *e, *t; | ||
707 | struct timeval now; | ||
708 | unsigned long flags; | ||
709 | unsigned int seq; | ||
710 | |||
711 | do_gettimeofday(&now); | ||
712 | seq = drm_vblank_count(dev, crtc); | ||
713 | |||
714 | spin_lock_irqsave(&dev->event_lock, flags); | ||
715 | |||
716 | list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { | ||
717 | if (e->pipe != crtc) | ||
718 | continue; | ||
719 | if ((seq - e->event.sequence) > (1<<23)) | ||
720 | continue; | ||
721 | |||
722 | DRM_DEBUG("vblank event on %d, current %d\n", | ||
723 | e->event.sequence, seq); | ||
724 | |||
725 | e->event.sequence = seq; | ||
726 | e->event.tv_sec = now.tv_sec; | ||
727 | e->event.tv_usec = now.tv_usec; | ||
728 | drm_vblank_put(dev, e->pipe); | ||
729 | list_move_tail(&e->base.link, &e->base.file_priv->event_list); | ||
730 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
731 | } | ||
732 | |||
733 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
734 | } | ||
735 | |||
644 | /** | 736 | /** |
645 | * drm_handle_vblank - handle a vblank event | 737 | * drm_handle_vblank - handle a vblank event |
646 | * @dev: DRM device | 738 | * @dev: DRM device |
@@ -651,7 +743,11 @@ done: | |||
651 | */ | 743 | */ |
652 | void drm_handle_vblank(struct drm_device *dev, int crtc) | 744 | void drm_handle_vblank(struct drm_device *dev, int crtc) |
653 | { | 745 | { |
746 | if (!dev->num_crtcs) | ||
747 | return; | ||
748 | |||
654 | atomic_inc(&dev->_vblank_count[crtc]); | 749 | atomic_inc(&dev->_vblank_count[crtc]); |
655 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 750 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
751 | drm_handle_vblank_events(dev, crtc); | ||
656 | } | 752 | } |
657 | EXPORT_SYMBOL(drm_handle_vblank); | 753 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 97dc5a4f0de4..1f0d717dbad6 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
@@ -395,7 +395,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | |||
395 | else | 395 | else |
396 | total_used += entry->size; | 396 | total_used += entry->size; |
397 | } | 397 | } |
398 | seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); | 398 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); |
399 | return 0; | 399 | return 0; |
400 | } | 400 | } |
401 | EXPORT_SYMBOL(drm_mm_dump_table); | 401 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 51f677215f1d..6d81a02463a3 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode) | |||
553 | } | 553 | } |
554 | EXPORT_SYMBOL(drm_mode_height); | 554 | EXPORT_SYMBOL(drm_mode_height); |
555 | 555 | ||
556 | /** drm_mode_hsync - get the hsync of a mode | ||
557 | * @mode: mode | ||
558 | * | ||
559 | * LOCKING: | ||
560 | * None. | ||
561 | * | ||
562 | * Return @modes's hsync rate in kHz, rounded to the nearest int. | ||
563 | */ | ||
564 | int drm_mode_hsync(struct drm_display_mode *mode) | ||
565 | { | ||
566 | unsigned int calc_val; | ||
567 | |||
568 | if (mode->hsync) | ||
569 | return mode->hsync; | ||
570 | |||
571 | if (mode->htotal < 0) | ||
572 | return 0; | ||
573 | |||
574 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | ||
575 | calc_val += 500; /* round to 1000Hz */ | ||
576 | calc_val /= 1000; /* truncate to kHz */ | ||
577 | |||
578 | return calc_val; | ||
579 | } | ||
580 | EXPORT_SYMBOL(drm_mode_hsync); | ||
581 | |||
556 | /** | 582 | /** |
557 | * drm_mode_vrefresh - get the vrefresh of a mode | 583 | * drm_mode_vrefresh - get the vrefresh of a mode |
558 | * @mode: mode | 584 | * @mode: mode |
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height); | |||
560 | * LOCKING: | 586 | * LOCKING: |
561 | * None. | 587 | * None. |
562 | * | 588 | * |
563 | * Return @mode's vrefresh rate or calculate it if necessary. | 589 | * Return @mode's vrefresh rate in Hz or calculate it if necessary. |
564 | * | 590 | * |
565 | * FIXME: why is this needed? shouldn't vrefresh be set already? | 591 | * FIXME: why is this needed? shouldn't vrefresh be set already? |
566 | * | 592 | * |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 55bb8a82d612..ad73e141afdb 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master) | |||
128 | kref_get(&master->refcount); | 128 | kref_get(&master->refcount); |
129 | return master; | 129 | return master; |
130 | } | 130 | } |
131 | EXPORT_SYMBOL(drm_master_get); | ||
131 | 132 | ||
132 | static void drm_master_destroy(struct kref *kref) | 133 | static void drm_master_destroy(struct kref *kref) |
133 | { | 134 | { |
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master) | |||
170 | kref_put(&(*master)->refcount, drm_master_destroy); | 171 | kref_put(&(*master)->refcount, drm_master_destroy); |
171 | *master = NULL; | 172 | *master = NULL; |
172 | } | 173 | } |
174 | EXPORT_SYMBOL(drm_master_put); | ||
173 | 175 | ||
174 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, | 176 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
175 | struct drm_file *file_priv) | 177 | struct drm_file *file_priv) |
176 | { | 178 | { |
179 | int ret = 0; | ||
180 | |||
177 | if (file_priv->is_master) | 181 | if (file_priv->is_master) |
178 | return 0; | 182 | return 0; |
179 | 183 | ||
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
188 | mutex_lock(&dev->struct_mutex); | 192 | mutex_lock(&dev->struct_mutex); |
189 | file_priv->minor->master = drm_master_get(file_priv->master); | 193 | file_priv->minor->master = drm_master_get(file_priv->master); |
190 | file_priv->is_master = 1; | 194 | file_priv->is_master = 1; |
195 | if (dev->driver->master_set) { | ||
196 | ret = dev->driver->master_set(dev, file_priv, false); | ||
197 | if (unlikely(ret != 0)) { | ||
198 | file_priv->is_master = 0; | ||
199 | drm_master_put(&file_priv->minor->master); | ||
200 | } | ||
201 | } | ||
191 | mutex_unlock(&dev->struct_mutex); | 202 | mutex_unlock(&dev->struct_mutex); |
192 | } | 203 | } |
193 | 204 | ||
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | |||
204 | return -EINVAL; | 215 | return -EINVAL; |
205 | 216 | ||
206 | mutex_lock(&dev->struct_mutex); | 217 | mutex_lock(&dev->struct_mutex); |
218 | if (dev->driver->master_drop) | ||
219 | dev->driver->master_drop(dev, file_priv, false); | ||
207 | drm_master_put(&file_priv->minor->master); | 220 | drm_master_put(&file_priv->minor->master); |
208 | file_priv->is_master = 0; | 221 | file_priv->is_master = 0; |
209 | mutex_unlock(&dev->struct_mutex); | 222 | mutex_unlock(&dev->struct_mutex); |
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
220 | INIT_LIST_HEAD(&dev->ctxlist); | 233 | INIT_LIST_HEAD(&dev->ctxlist); |
221 | INIT_LIST_HEAD(&dev->vmalist); | 234 | INIT_LIST_HEAD(&dev->vmalist); |
222 | INIT_LIST_HEAD(&dev->maplist); | 235 | INIT_LIST_HEAD(&dev->maplist); |
236 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||
223 | 237 | ||
224 | spin_lock_init(&dev->count_lock); | 238 | spin_lock_init(&dev->count_lock); |
225 | spin_lock_init(&dev->drw_lock); | 239 | spin_lock_init(&dev->drw_lock); |
240 | spin_lock_init(&dev->event_lock); | ||
226 | init_timer(&dev->timer); | 241 | init_timer(&dev->timer); |
227 | mutex_init(&dev->struct_mutex); | 242 | mutex_init(&dev->struct_mutex); |
228 | mutex_init(&dev->ctxlist_mutex); | 243 | mutex_init(&dev->ctxlist_mutex); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fa7b9be096bc..e3d049229cdd 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
15 | intel_lvds.o \ | 15 | intel_lvds.o \ |
16 | intel_bios.o \ | 16 | intel_bios.o \ |
17 | intel_dp.o \ | 17 | intel_dp.o \ |
18 | intel_dp_i2c.o \ | ||
19 | intel_hdmi.o \ | 18 | intel_hdmi.o \ |
20 | intel_sdvo.o \ | 19 | intel_sdvo.o \ |
21 | intel_modes.o \ | 20 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7f436ec075f6..2fa217862058 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -333,6 +333,7 @@ static struct drm_driver driver = { | |||
333 | .mmap = drm_gem_mmap, | 333 | .mmap = drm_gem_mmap, |
334 | .poll = drm_poll, | 334 | .poll = drm_poll, |
335 | .fasync = drm_fasync, | 335 | .fasync = drm_fasync, |
336 | .read = drm_read, | ||
336 | #ifdef CONFIG_COMPAT | 337 | #ifdef CONFIG_COMPAT |
337 | .compat_ioctl = i915_compat_ioctl, | 338 | .compat_ioctl = i915_compat_ioctl, |
338 | #endif | 339 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 099f420de57a..897230832c8c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -32,7 +32,7 @@ | |||
32 | #include "intel_drv.h" | 32 | #include "intel_drv.h" |
33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
35 | #include "intel_dp.h" | 35 | #include "drm_dp_helper.h" |
36 | 36 | ||
37 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
38 | 38 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d83447557f9b..63424d5db9c6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | #include "intel_dp.h" | 36 | #include "drm_dp_helper.h" |
37 | 37 | ||
38 | #define DP_LINK_STATUS_SIZE 6 | 38 | #define DP_LINK_STATUS_SIZE 6 |
39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
@@ -382,17 +382,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
382 | } | 382 | } |
383 | 383 | ||
384 | static int | 384 | static int |
385 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, | 385 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
386 | uint8_t *send, int send_bytes, | 386 | uint8_t write_byte, uint8_t *read_byte) |
387 | uint8_t *recv, int recv_bytes) | ||
388 | { | 387 | { |
388 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
389 | struct intel_dp_priv *dp_priv = container_of(adapter, | 389 | struct intel_dp_priv *dp_priv = container_of(adapter, |
390 | struct intel_dp_priv, | 390 | struct intel_dp_priv, |
391 | adapter); | 391 | adapter); |
392 | struct intel_output *intel_output = dp_priv->intel_output; | 392 | struct intel_output *intel_output = dp_priv->intel_output; |
393 | uint16_t address = algo_data->address; | ||
394 | uint8_t msg[5]; | ||
395 | uint8_t reply[2]; | ||
396 | int msg_bytes; | ||
397 | int reply_bytes; | ||
398 | int ret; | ||
399 | |||
400 | /* Set up the command byte */ | ||
401 | if (mode & MODE_I2C_READ) | ||
402 | msg[0] = AUX_I2C_READ << 4; | ||
403 | else | ||
404 | msg[0] = AUX_I2C_WRITE << 4; | ||
405 | |||
406 | if (!(mode & MODE_I2C_STOP)) | ||
407 | msg[0] |= AUX_I2C_MOT << 4; | ||
408 | |||
409 | msg[1] = address >> 8; | ||
410 | msg[2] = address; | ||
411 | |||
412 | switch (mode) { | ||
413 | case MODE_I2C_WRITE: | ||
414 | msg[3] = 0; | ||
415 | msg[4] = write_byte; | ||
416 | msg_bytes = 5; | ||
417 | reply_bytes = 1; | ||
418 | break; | ||
419 | case MODE_I2C_READ: | ||
420 | msg[3] = 0; | ||
421 | msg_bytes = 4; | ||
422 | reply_bytes = 2; | ||
423 | break; | ||
424 | default: | ||
425 | msg_bytes = 3; | ||
426 | reply_bytes = 1; | ||
427 | break; | ||
428 | } | ||
393 | 429 | ||
394 | return intel_dp_aux_ch(intel_output, | 430 | for (;;) { |
395 | send, send_bytes, recv, recv_bytes); | 431 | ret = intel_dp_aux_ch(intel_output, |
432 | msg, msg_bytes, | ||
433 | reply, reply_bytes); | ||
434 | if (ret < 0) { | ||
435 | DRM_DEBUG("aux_ch failed %d\n", ret); | ||
436 | return ret; | ||
437 | } | ||
438 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
439 | case AUX_I2C_REPLY_ACK: | ||
440 | if (mode == MODE_I2C_READ) { | ||
441 | *read_byte = reply[1]; | ||
442 | } | ||
443 | return reply_bytes - 1; | ||
444 | case AUX_I2C_REPLY_NACK: | ||
445 | DRM_DEBUG("aux_ch nack\n"); | ||
446 | return -EREMOTEIO; | ||
447 | case AUX_I2C_REPLY_DEFER: | ||
448 | DRM_DEBUG("aux_ch defer\n"); | ||
449 | udelay(100); | ||
450 | break; | ||
451 | default: | ||
452 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
453 | return -EREMOTEIO; | ||
454 | } | ||
455 | } | ||
396 | } | 456 | } |
397 | 457 | ||
398 | static int | 458 | static int |
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h deleted file mode 100644 index 2b38054d3b6d..000000000000 --- a/drivers/gpu/drm/i915/intel_dp.h +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright © 2008 Keith Packard | ||
3 | * | ||
4 | * Permission to use, copy, modify, distribute, and sell this software and its | ||
5 | * documentation for any purpose is hereby granted without fee, provided that | ||
6 | * the above copyright notice appear in all copies and that both that copyright | ||
7 | * notice and this permission notice appear in supporting documentation, and | ||
8 | * that the name of the copyright holders not be used in advertising or | ||
9 | * publicity pertaining to distribution of the software without specific, | ||
10 | * written prior permission. The copyright holders make no representations | ||
11 | * about the suitability of this software for any purpose. It is provided "as | ||
12 | * is" without express or implied warranty. | ||
13 | * | ||
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | ||
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | ||
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | ||
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | ||
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | ||
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | ||
20 | * OF THIS SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #ifndef _INTEL_DP_H_ | ||
24 | #define _INTEL_DP_H_ | ||
25 | |||
26 | /* From the VESA DisplayPort spec */ | ||
27 | |||
28 | #define AUX_NATIVE_WRITE 0x8 | ||
29 | #define AUX_NATIVE_READ 0x9 | ||
30 | #define AUX_I2C_WRITE 0x0 | ||
31 | #define AUX_I2C_READ 0x1 | ||
32 | #define AUX_I2C_STATUS 0x2 | ||
33 | #define AUX_I2C_MOT 0x4 | ||
34 | |||
35 | #define AUX_NATIVE_REPLY_ACK (0x0 << 4) | ||
36 | #define AUX_NATIVE_REPLY_NACK (0x1 << 4) | ||
37 | #define AUX_NATIVE_REPLY_DEFER (0x2 << 4) | ||
38 | #define AUX_NATIVE_REPLY_MASK (0x3 << 4) | ||
39 | |||
40 | #define AUX_I2C_REPLY_ACK (0x0 << 6) | ||
41 | #define AUX_I2C_REPLY_NACK (0x1 << 6) | ||
42 | #define AUX_I2C_REPLY_DEFER (0x2 << 6) | ||
43 | #define AUX_I2C_REPLY_MASK (0x3 << 6) | ||
44 | |||
45 | /* AUX CH addresses */ | ||
46 | #define DP_LINK_BW_SET 0x100 | ||
47 | # define DP_LINK_BW_1_62 0x06 | ||
48 | # define DP_LINK_BW_2_7 0x0a | ||
49 | |||
50 | #define DP_LANE_COUNT_SET 0x101 | ||
51 | # define DP_LANE_COUNT_MASK 0x0f | ||
52 | # define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) | ||
53 | |||
54 | #define DP_TRAINING_PATTERN_SET 0x102 | ||
55 | |||
56 | # define DP_TRAINING_PATTERN_DISABLE 0 | ||
57 | # define DP_TRAINING_PATTERN_1 1 | ||
58 | # define DP_TRAINING_PATTERN_2 2 | ||
59 | # define DP_TRAINING_PATTERN_MASK 0x3 | ||
60 | |||
61 | # define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) | ||
62 | # define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) | ||
63 | # define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) | ||
64 | # define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) | ||
65 | # define DP_LINK_QUAL_PATTERN_MASK (3 << 2) | ||
66 | |||
67 | # define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) | ||
68 | # define DP_LINK_SCRAMBLING_DISABLE (1 << 5) | ||
69 | |||
70 | # define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) | ||
71 | # define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) | ||
72 | # define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) | ||
73 | # define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) | ||
74 | |||
75 | #define DP_TRAINING_LANE0_SET 0x103 | ||
76 | #define DP_TRAINING_LANE1_SET 0x104 | ||
77 | #define DP_TRAINING_LANE2_SET 0x105 | ||
78 | #define DP_TRAINING_LANE3_SET 0x106 | ||
79 | |||
80 | # define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 | ||
81 | # define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 | ||
82 | # define DP_TRAIN_MAX_SWING_REACHED (1 << 2) | ||
83 | # define DP_TRAIN_VOLTAGE_SWING_400 (0 << 0) | ||
84 | # define DP_TRAIN_VOLTAGE_SWING_600 (1 << 0) | ||
85 | # define DP_TRAIN_VOLTAGE_SWING_800 (2 << 0) | ||
86 | # define DP_TRAIN_VOLTAGE_SWING_1200 (3 << 0) | ||
87 | |||
88 | # define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) | ||
89 | # define DP_TRAIN_PRE_EMPHASIS_0 (0 << 3) | ||
90 | # define DP_TRAIN_PRE_EMPHASIS_3_5 (1 << 3) | ||
91 | # define DP_TRAIN_PRE_EMPHASIS_6 (2 << 3) | ||
92 | # define DP_TRAIN_PRE_EMPHASIS_9_5 (3 << 3) | ||
93 | |||
94 | # define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 | ||
95 | # define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) | ||
96 | |||
97 | #define DP_DOWNSPREAD_CTRL 0x107 | ||
98 | # define DP_SPREAD_AMP_0_5 (1 << 4) | ||
99 | |||
100 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | ||
101 | # define DP_SET_ANSI_8B10B (1 << 0) | ||
102 | |||
103 | #define DP_LANE0_1_STATUS 0x202 | ||
104 | #define DP_LANE2_3_STATUS 0x203 | ||
105 | |||
106 | # define DP_LANE_CR_DONE (1 << 0) | ||
107 | # define DP_LANE_CHANNEL_EQ_DONE (1 << 1) | ||
108 | # define DP_LANE_SYMBOL_LOCKED (1 << 2) | ||
109 | |||
110 | #define DP_LANE_ALIGN_STATUS_UPDATED 0x204 | ||
111 | |||
112 | #define DP_INTERLANE_ALIGN_DONE (1 << 0) | ||
113 | #define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) | ||
114 | #define DP_LINK_STATUS_UPDATED (1 << 7) | ||
115 | |||
116 | #define DP_SINK_STATUS 0x205 | ||
117 | |||
118 | #define DP_RECEIVE_PORT_0_STATUS (1 << 0) | ||
119 | #define DP_RECEIVE_PORT_1_STATUS (1 << 1) | ||
120 | |||
121 | #define DP_ADJUST_REQUEST_LANE0_1 0x206 | ||
122 | #define DP_ADJUST_REQUEST_LANE2_3 0x207 | ||
123 | |||
124 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 | ||
125 | #define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 | ||
126 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c | ||
127 | #define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 | ||
128 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 | ||
129 | #define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 | ||
130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | ||
131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | ||
132 | |||
133 | struct i2c_algo_dp_aux_data { | ||
134 | bool running; | ||
135 | u16 address; | ||
136 | int (*aux_ch) (struct i2c_adapter *adapter, | ||
137 | uint8_t *send, int send_bytes, | ||
138 | uint8_t *recv, int recv_bytes); | ||
139 | }; | ||
140 | |||
141 | int | ||
142 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | ||
143 | |||
144 | #endif /* _INTEL_DP_H_ */ | ||
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b0a9de7a57c2..1e138f5bae09 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ |
7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o | ||
7 | 8 | ||
8 | obj-$(CONFIG_DRM_TTM) += ttm.o | 9 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 87c06252d464..e13fd23f3334 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -275,9 +275,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | 275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
276 | page_flags | TTM_PAGE_FLAG_USER, | 276 | page_flags | TTM_PAGE_FLAG_USER, |
277 | glob->dummy_read_page); | 277 | glob->dummy_read_page); |
278 | if (unlikely(bo->ttm == NULL)) | 278 | if (unlikely(bo->ttm == NULL)) { |
279 | ret = -ENOMEM; | 279 | ret = -ENOMEM; |
280 | break; | 280 | break; |
281 | } | ||
281 | 282 | ||
282 | ret = ttm_tt_set_user(bo->ttm, current, | 283 | ret = ttm_tt_set_user(bo->ttm, current, |
283 | bo->buffer_start, bo->num_pages); | 284 | bo->buffer_start, bo->num_pages); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c70927ecda21..ceae52f45c39 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
369 | #endif | 369 | #endif |
370 | return tmp; | 370 | return tmp; |
371 | } | 371 | } |
372 | EXPORT_SYMBOL(ttm_io_prot); | ||
372 | 373 | ||
373 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | 374 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
374 | unsigned long bus_base, | 375 | unsigned long bus_base, |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c new file mode 100644 index 000000000000..c285c2902d15 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "ttm/ttm_execbuf_util.h" | ||
29 | #include "ttm/ttm_bo_driver.h" | ||
30 | #include "ttm/ttm_placement.h" | ||
31 | #include <linux/wait.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/module.h> | ||
34 | |||
35 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
36 | { | ||
37 | struct ttm_validate_buffer *entry; | ||
38 | |||
39 | list_for_each_entry(entry, list, head) { | ||
40 | struct ttm_buffer_object *bo = entry->bo; | ||
41 | if (!entry->reserved) | ||
42 | continue; | ||
43 | |||
44 | entry->reserved = false; | ||
45 | ttm_bo_unreserve(bo); | ||
46 | } | ||
47 | } | ||
48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | ||
49 | |||
50 | /* | ||
51 | * Reserve buffers for validation. | ||
52 | * | ||
53 | * If a buffer in the list is marked for CPU access, we back off and | ||
54 | * wait for that buffer to become free for GPU access. | ||
55 | * | ||
56 | * If a buffer is reserved for another validation, the validator with | ||
57 | * the highest validation sequence backs off and waits for that buffer | ||
58 | * to become unreserved. This prevents deadlocks when validating multiple | ||
59 | * buffers in different orders. | ||
60 | */ | ||
61 | |||
62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | ||
63 | { | ||
64 | struct ttm_validate_buffer *entry; | ||
65 | int ret; | ||
66 | |||
67 | retry: | ||
68 | list_for_each_entry(entry, list, head) { | ||
69 | struct ttm_buffer_object *bo = entry->bo; | ||
70 | |||
71 | entry->reserved = false; | ||
72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | ||
73 | if (ret != 0) { | ||
74 | ttm_eu_backoff_reservation(list); | ||
75 | if (ret == -EAGAIN) { | ||
76 | ret = ttm_bo_wait_unreserved(bo, true); | ||
77 | if (unlikely(ret != 0)) | ||
78 | return ret; | ||
79 | goto retry; | ||
80 | } else | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | entry->reserved = true; | ||
85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
86 | ttm_eu_backoff_reservation(list); | ||
87 | ret = ttm_bo_wait_cpu(bo, false); | ||
88 | if (ret) | ||
89 | return ret; | ||
90 | goto retry; | ||
91 | } | ||
92 | } | ||
93 | return 0; | ||
94 | } | ||
95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | ||
96 | |||
97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | ||
98 | { | ||
99 | struct ttm_validate_buffer *entry; | ||
100 | |||
101 | list_for_each_entry(entry, list, head) { | ||
102 | struct ttm_buffer_object *bo = entry->bo; | ||
103 | struct ttm_bo_driver *driver = bo->bdev->driver; | ||
104 | void *old_sync_obj; | ||
105 | |||
106 | spin_lock(&bo->lock); | ||
107 | old_sync_obj = bo->sync_obj; | ||
108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | ||
110 | spin_unlock(&bo->lock); | ||
111 | ttm_bo_unreserve(bo); | ||
112 | entry->reserved = false; | ||
113 | if (old_sync_obj) | ||
114 | driver->sync_obj_unref(&old_sync_obj); | ||
115 | } | ||
116 | } | ||
117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c new file mode 100644 index 000000000000..f619ebcaa4ec --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
@@ -0,0 +1,311 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | |||
31 | #include "ttm/ttm_lock.h" | ||
32 | #include "ttm/ttm_module.h" | ||
33 | #include <asm/atomic.h> | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/wait.h> | ||
36 | #include <linux/sched.h> | ||
37 | #include <linux/module.h> | ||
38 | |||
39 | #define TTM_WRITE_LOCK_PENDING (1 << 0) | ||
40 | #define TTM_VT_LOCK_PENDING (1 << 1) | ||
41 | #define TTM_SUSPEND_LOCK_PENDING (1 << 2) | ||
42 | #define TTM_VT_LOCK (1 << 3) | ||
43 | #define TTM_SUSPEND_LOCK (1 << 4) | ||
44 | |||
45 | void ttm_lock_init(struct ttm_lock *lock) | ||
46 | { | ||
47 | spin_lock_init(&lock->lock); | ||
48 | init_waitqueue_head(&lock->queue); | ||
49 | lock->rw = 0; | ||
50 | lock->flags = 0; | ||
51 | lock->kill_takers = false; | ||
52 | lock->signal = SIGKILL; | ||
53 | } | ||
54 | EXPORT_SYMBOL(ttm_lock_init); | ||
55 | |||
56 | void ttm_read_unlock(struct ttm_lock *lock) | ||
57 | { | ||
58 | spin_lock(&lock->lock); | ||
59 | if (--lock->rw == 0) | ||
60 | wake_up_all(&lock->queue); | ||
61 | spin_unlock(&lock->lock); | ||
62 | } | ||
63 | EXPORT_SYMBOL(ttm_read_unlock); | ||
64 | |||
65 | static bool __ttm_read_lock(struct ttm_lock *lock) | ||
66 | { | ||
67 | bool locked = false; | ||
68 | |||
69 | spin_lock(&lock->lock); | ||
70 | if (unlikely(lock->kill_takers)) { | ||
71 | send_sig(lock->signal, current, 0); | ||
72 | spin_unlock(&lock->lock); | ||
73 | return false; | ||
74 | } | ||
75 | if (lock->rw >= 0 && lock->flags == 0) { | ||
76 | ++lock->rw; | ||
77 | locked = true; | ||
78 | } | ||
79 | spin_unlock(&lock->lock); | ||
80 | return locked; | ||
81 | } | ||
82 | |||
83 | int ttm_read_lock(struct ttm_lock *lock, bool interruptible) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (interruptible) | ||
88 | ret = wait_event_interruptible(lock->queue, | ||
89 | __ttm_read_lock(lock)); | ||
90 | else | ||
91 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
92 | return ret; | ||
93 | } | ||
94 | EXPORT_SYMBOL(ttm_read_lock); | ||
95 | |||
96 | static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) | ||
97 | { | ||
98 | bool block = true; | ||
99 | |||
100 | *locked = false; | ||
101 | |||
102 | spin_lock(&lock->lock); | ||
103 | if (unlikely(lock->kill_takers)) { | ||
104 | send_sig(lock->signal, current, 0); | ||
105 | spin_unlock(&lock->lock); | ||
106 | return false; | ||
107 | } | ||
108 | if (lock->rw >= 0 && lock->flags == 0) { | ||
109 | ++lock->rw; | ||
110 | block = false; | ||
111 | *locked = true; | ||
112 | } else if (lock->flags == 0) { | ||
113 | block = false; | ||
114 | } | ||
115 | spin_unlock(&lock->lock); | ||
116 | |||
117 | return !block; | ||
118 | } | ||
119 | |||
120 | int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) | ||
121 | { | ||
122 | int ret = 0; | ||
123 | bool locked; | ||
124 | |||
125 | if (interruptible) | ||
126 | ret = wait_event_interruptible | ||
127 | (lock->queue, __ttm_read_trylock(lock, &locked)); | ||
128 | else | ||
129 | wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); | ||
130 | |||
131 | if (unlikely(ret != 0)) { | ||
132 | BUG_ON(locked); | ||
133 | return ret; | ||
134 | } | ||
135 | |||
136 | return (locked) ? 0 : -EBUSY; | ||
137 | } | ||
138 | |||
139 | void ttm_write_unlock(struct ttm_lock *lock) | ||
140 | { | ||
141 | spin_lock(&lock->lock); | ||
142 | lock->rw = 0; | ||
143 | wake_up_all(&lock->queue); | ||
144 | spin_unlock(&lock->lock); | ||
145 | } | ||
146 | EXPORT_SYMBOL(ttm_write_unlock); | ||
147 | |||
148 | static bool __ttm_write_lock(struct ttm_lock *lock) | ||
149 | { | ||
150 | bool locked = false; | ||
151 | |||
152 | spin_lock(&lock->lock); | ||
153 | if (unlikely(lock->kill_takers)) { | ||
154 | send_sig(lock->signal, current, 0); | ||
155 | spin_unlock(&lock->lock); | ||
156 | return false; | ||
157 | } | ||
158 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { | ||
159 | lock->rw = -1; | ||
160 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
161 | locked = true; | ||
162 | } else { | ||
163 | lock->flags |= TTM_WRITE_LOCK_PENDING; | ||
164 | } | ||
165 | spin_unlock(&lock->lock); | ||
166 | return locked; | ||
167 | } | ||
168 | |||
169 | int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | ||
170 | { | ||
171 | int ret = 0; | ||
172 | |||
173 | if (interruptible) { | ||
174 | ret = wait_event_interruptible(lock->queue, | ||
175 | __ttm_write_lock(lock)); | ||
176 | if (unlikely(ret != 0)) { | ||
177 | spin_lock(&lock->lock); | ||
178 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
179 | wake_up_all(&lock->queue); | ||
180 | spin_unlock(&lock->lock); | ||
181 | } | ||
182 | } else | ||
183 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
184 | |||
185 | return ret; | ||
186 | } | ||
187 | EXPORT_SYMBOL(ttm_write_lock); | ||
188 | |||
189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
190 | { | ||
191 | spin_lock(&lock->lock); | ||
192 | lock->rw = 1; | ||
193 | wake_up_all(&lock->queue); | ||
194 | spin_unlock(&lock->lock); | ||
195 | } | ||
196 | |||
197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | ||
198 | { | ||
199 | int ret = 0; | ||
200 | |||
201 | spin_lock(&lock->lock); | ||
202 | if (unlikely(!(lock->flags & TTM_VT_LOCK))) | ||
203 | ret = -EINVAL; | ||
204 | lock->flags &= ~TTM_VT_LOCK; | ||
205 | wake_up_all(&lock->queue); | ||
206 | spin_unlock(&lock->lock); | ||
207 | printk(KERN_INFO TTM_PFX "vt unlock.\n"); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void ttm_vt_lock_remove(struct ttm_base_object **p_base) | ||
213 | { | ||
214 | struct ttm_base_object *base = *p_base; | ||
215 | struct ttm_lock *lock = container_of(base, struct ttm_lock, base); | ||
216 | int ret; | ||
217 | |||
218 | *p_base = NULL; | ||
219 | ret = __ttm_vt_unlock(lock); | ||
220 | BUG_ON(ret != 0); | ||
221 | } | ||
222 | |||
223 | static bool __ttm_vt_lock(struct ttm_lock *lock) | ||
224 | { | ||
225 | bool locked = false; | ||
226 | |||
227 | spin_lock(&lock->lock); | ||
228 | if (lock->rw == 0) { | ||
229 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
230 | lock->flags |= TTM_VT_LOCK; | ||
231 | locked = true; | ||
232 | } else { | ||
233 | lock->flags |= TTM_VT_LOCK_PENDING; | ||
234 | } | ||
235 | spin_unlock(&lock->lock); | ||
236 | return locked; | ||
237 | } | ||
238 | |||
239 | int ttm_vt_lock(struct ttm_lock *lock, | ||
240 | bool interruptible, | ||
241 | struct ttm_object_file *tfile) | ||
242 | { | ||
243 | int ret = 0; | ||
244 | |||
245 | if (interruptible) { | ||
246 | ret = wait_event_interruptible(lock->queue, | ||
247 | __ttm_vt_lock(lock)); | ||
248 | if (unlikely(ret != 0)) { | ||
249 | spin_lock(&lock->lock); | ||
250 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
251 | wake_up_all(&lock->queue); | ||
252 | spin_unlock(&lock->lock); | ||
253 | return ret; | ||
254 | } | ||
255 | } else | ||
256 | wait_event(lock->queue, __ttm_vt_lock(lock)); | ||
257 | |||
258 | /* | ||
259 | * Add a base-object, the destructor of which will | ||
260 | * make sure the lock is released if the client dies | ||
261 | * while holding it. | ||
262 | */ | ||
263 | |||
264 | ret = ttm_base_object_init(tfile, &lock->base, false, | ||
265 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | ||
266 | if (ret) | ||
267 | (void)__ttm_vt_unlock(lock); | ||
268 | else { | ||
269 | lock->vt_holder = tfile; | ||
270 | printk(KERN_INFO TTM_PFX "vt lock.\n"); | ||
271 | } | ||
272 | |||
273 | return ret; | ||
274 | } | ||
275 | EXPORT_SYMBOL(ttm_vt_lock); | ||
276 | |||
277 | int ttm_vt_unlock(struct ttm_lock *lock) | ||
278 | { | ||
279 | return ttm_ref_object_base_unref(lock->vt_holder, | ||
280 | lock->base.hash.key, TTM_REF_USAGE); | ||
281 | } | ||
282 | EXPORT_SYMBOL(ttm_vt_unlock); | ||
283 | |||
284 | void ttm_suspend_unlock(struct ttm_lock *lock) | ||
285 | { | ||
286 | spin_lock(&lock->lock); | ||
287 | lock->flags &= ~TTM_SUSPEND_LOCK; | ||
288 | wake_up_all(&lock->queue); | ||
289 | spin_unlock(&lock->lock); | ||
290 | } | ||
291 | |||
292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | ||
293 | { | ||
294 | bool locked = false; | ||
295 | |||
296 | spin_lock(&lock->lock); | ||
297 | if (lock->rw == 0) { | ||
298 | lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; | ||
299 | lock->flags |= TTM_SUSPEND_LOCK; | ||
300 | locked = true; | ||
301 | } else { | ||
302 | lock->flags |= TTM_SUSPEND_LOCK_PENDING; | ||
303 | } | ||
304 | spin_unlock(&lock->lock); | ||
305 | return locked; | ||
306 | } | ||
307 | |||
308 | void ttm_suspend_lock(struct ttm_lock *lock) | ||
309 | { | ||
310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | ||
311 | } | ||
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 072c281a6bb5..8bfde5f40841 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | 274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, |
275 | const struct sysinfo *si) | 275 | const struct sysinfo *si) |
276 | { | 276 | { |
277 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); | 277 | struct ttm_mem_zone *zone; |
278 | uint64_t mem; | 278 | uint64_t mem; |
279 | int ret; | 279 | int ret; |
280 | 280 | ||
281 | if (unlikely(!zone)) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | if (si->totalhigh == 0) | 281 | if (si->totalhigh == 0) |
285 | return 0; | 282 | return 0; |
286 | 283 | ||
284 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); | ||
285 | if (unlikely(!zone)) | ||
286 | return -ENOMEM; | ||
287 | |||
287 | mem = si->totalram; | 288 | mem = si->totalram; |
288 | mem *= si->mem_unit; | 289 | mem *= si->mem_unit; |
289 | 290 | ||
@@ -460,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob, | |||
460 | { | 461 | { |
461 | return ttm_mem_global_free_zone(glob, NULL, amount); | 462 | return ttm_mem_global_free_zone(glob, NULL, amount); |
462 | } | 463 | } |
464 | EXPORT_SYMBOL(ttm_mem_global_free); | ||
463 | 465 | ||
464 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, | 466 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, |
465 | struct ttm_mem_zone *single_zone, | 467 | struct ttm_mem_zone *single_zone, |
@@ -533,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, | |||
533 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, | 535 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, |
534 | interruptible); | 536 | interruptible); |
535 | } | 537 | } |
538 | EXPORT_SYMBOL(ttm_mem_global_alloc); | ||
536 | 539 | ||
537 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, | 540 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
538 | struct page *page, | 541 | struct page *page, |
@@ -588,3 +591,4 @@ size_t ttm_round_pot(size_t size) | |||
588 | } | 591 | } |
589 | return 0; | 592 | return 0; |
590 | } | 593 | } |
594 | EXPORT_SYMBOL(ttm_round_pot); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c new file mode 100644 index 000000000000..1099abac824b --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
@@ -0,0 +1,452 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | /* | ||
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
29 | */ | ||
30 | /** @file ttm_ref_object.c | ||
31 | * | ||
32 | * Base- and reference object implementation for the various | ||
33 | * ttm objects. Implements reference counting, minimal security checks | ||
34 | * and release on file close. | ||
35 | */ | ||
36 | |||
37 | /** | ||
38 | * struct ttm_object_file | ||
39 | * | ||
40 | * @tdev: Pointer to the ttm_object_device. | ||
41 | * | ||
42 | * @lock: Lock that protects the ref_list list and the | ||
43 | * ref_hash hash tables. | ||
44 | * | ||
45 | * @ref_list: List of ttm_ref_objects to be destroyed at | ||
46 | * file release. | ||
47 | * | ||
48 | * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, | ||
49 | * for fast lookup of ref objects given a base object. | ||
50 | */ | ||
51 | |||
52 | #include "ttm/ttm_object.h" | ||
53 | #include "ttm/ttm_module.h" | ||
54 | #include <linux/list.h> | ||
55 | #include <linux/spinlock.h> | ||
56 | #include <linux/slab.h> | ||
57 | #include <linux/module.h> | ||
58 | #include <asm/atomic.h> | ||
59 | |||
60 | struct ttm_object_file { | ||
61 | struct ttm_object_device *tdev; | ||
62 | rwlock_t lock; | ||
63 | struct list_head ref_list; | ||
64 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | ||
65 | struct kref refcount; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct ttm_object_device | ||
70 | * | ||
71 | * @object_lock: lock that protects the object_hash hash table. | ||
72 | * | ||
73 | * @object_hash: hash table for fast lookup of object global names. | ||
74 | * | ||
75 | * @object_count: Per device object count. | ||
76 | * | ||
77 | * This is the per-device data structure needed for ttm object management. | ||
78 | */ | ||
79 | |||
80 | struct ttm_object_device { | ||
81 | rwlock_t object_lock; | ||
82 | struct drm_open_hash object_hash; | ||
83 | atomic_t object_count; | ||
84 | struct ttm_mem_global *mem_glob; | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * struct ttm_ref_object | ||
89 | * | ||
90 | * @hash: Hash entry for the per-file object reference hash. | ||
91 | * | ||
92 | * @head: List entry for the per-file list of ref-objects. | ||
93 | * | ||
94 | * @kref: Ref count. | ||
95 | * | ||
96 | * @obj: Base object this ref object is referencing. | ||
97 | * | ||
98 | * @ref_type: Type of ref object. | ||
99 | * | ||
100 | * This is similar to an idr object, but it also has a hash table entry | ||
101 | * that allows lookup with a pointer to the referenced object as a key. In | ||
102 | * that way, one can easily detect whether a base object is referenced by | ||
103 | * a particular ttm_object_file. It also carries a ref count to avoid creating | ||
104 | * multiple ref objects if a ttm_object_file references the same base | ||
105 | * object more than once. | ||
106 | */ | ||
107 | |||
108 | struct ttm_ref_object { | ||
109 | struct drm_hash_item hash; | ||
110 | struct list_head head; | ||
111 | struct kref kref; | ||
112 | struct ttm_base_object *obj; | ||
113 | enum ttm_ref_type ref_type; | ||
114 | struct ttm_object_file *tfile; | ||
115 | }; | ||
116 | |||
117 | static inline struct ttm_object_file * | ||
118 | ttm_object_file_ref(struct ttm_object_file *tfile) | ||
119 | { | ||
120 | kref_get(&tfile->refcount); | ||
121 | return tfile; | ||
122 | } | ||
123 | |||
124 | static void ttm_object_file_destroy(struct kref *kref) | ||
125 | { | ||
126 | struct ttm_object_file *tfile = | ||
127 | container_of(kref, struct ttm_object_file, refcount); | ||
128 | |||
129 | kfree(tfile); | ||
130 | } | ||
131 | |||
132 | |||
133 | static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) | ||
134 | { | ||
135 | struct ttm_object_file *tfile = *p_tfile; | ||
136 | |||
137 | *p_tfile = NULL; | ||
138 | kref_put(&tfile->refcount, ttm_object_file_destroy); | ||
139 | } | ||
140 | |||
141 | |||
142 | int ttm_base_object_init(struct ttm_object_file *tfile, | ||
143 | struct ttm_base_object *base, | ||
144 | bool shareable, | ||
145 | enum ttm_object_type object_type, | ||
146 | void (*refcount_release) (struct ttm_base_object **), | ||
147 | void (*ref_obj_release) (struct ttm_base_object *, | ||
148 | enum ttm_ref_type ref_type)) | ||
149 | { | ||
150 | struct ttm_object_device *tdev = tfile->tdev; | ||
151 | int ret; | ||
152 | |||
153 | base->shareable = shareable; | ||
154 | base->tfile = ttm_object_file_ref(tfile); | ||
155 | base->refcount_release = refcount_release; | ||
156 | base->ref_obj_release = ref_obj_release; | ||
157 | base->object_type = object_type; | ||
158 | write_lock(&tdev->object_lock); | ||
159 | kref_init(&base->refcount); | ||
160 | ret = drm_ht_just_insert_please(&tdev->object_hash, | ||
161 | &base->hash, | ||
162 | (unsigned long)base, 31, 0, 0); | ||
163 | write_unlock(&tdev->object_lock); | ||
164 | if (unlikely(ret != 0)) | ||
165 | goto out_err0; | ||
166 | |||
167 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | ||
168 | if (unlikely(ret != 0)) | ||
169 | goto out_err1; | ||
170 | |||
171 | ttm_base_object_unref(&base); | ||
172 | |||
173 | return 0; | ||
174 | out_err1: | ||
175 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
176 | out_err0: | ||
177 | return ret; | ||
178 | } | ||
179 | EXPORT_SYMBOL(ttm_base_object_init); | ||
180 | |||
181 | static void ttm_release_base(struct kref *kref) | ||
182 | { | ||
183 | struct ttm_base_object *base = | ||
184 | container_of(kref, struct ttm_base_object, refcount); | ||
185 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
186 | |||
187 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
188 | write_unlock(&tdev->object_lock); | ||
189 | if (base->refcount_release) { | ||
190 | ttm_object_file_unref(&base->tfile); | ||
191 | base->refcount_release(&base); | ||
192 | } | ||
193 | write_lock(&tdev->object_lock); | ||
194 | } | ||
195 | |||
196 | void ttm_base_object_unref(struct ttm_base_object **p_base) | ||
197 | { | ||
198 | struct ttm_base_object *base = *p_base; | ||
199 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
200 | |||
201 | *p_base = NULL; | ||
202 | |||
203 | /* | ||
204 | * Need to take the lock here to avoid racing with | ||
205 | * users trying to look up the object. | ||
206 | */ | ||
207 | |||
208 | write_lock(&tdev->object_lock); | ||
209 | (void)kref_put(&base->refcount, &ttm_release_base); | ||
210 | write_unlock(&tdev->object_lock); | ||
211 | } | ||
212 | EXPORT_SYMBOL(ttm_base_object_unref); | ||
213 | |||
214 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | ||
215 | uint32_t key) | ||
216 | { | ||
217 | struct ttm_object_device *tdev = tfile->tdev; | ||
218 | struct ttm_base_object *base; | ||
219 | struct drm_hash_item *hash; | ||
220 | int ret; | ||
221 | |||
222 | read_lock(&tdev->object_lock); | ||
223 | ret = drm_ht_find_item(&tdev->object_hash, key, &hash); | ||
224 | |||
225 | if (likely(ret == 0)) { | ||
226 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
227 | kref_get(&base->refcount); | ||
228 | } | ||
229 | read_unlock(&tdev->object_lock); | ||
230 | |||
231 | if (unlikely(ret != 0)) | ||
232 | return NULL; | ||
233 | |||
234 | if (tfile != base->tfile && !base->shareable) { | ||
235 | printk(KERN_ERR TTM_PFX | ||
236 | "Attempted access of non-shareable object.\n"); | ||
237 | ttm_base_object_unref(&base); | ||
238 | return NULL; | ||
239 | } | ||
240 | |||
241 | return base; | ||
242 | } | ||
243 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
244 | |||
245 | int ttm_ref_object_add(struct ttm_object_file *tfile, | ||
246 | struct ttm_base_object *base, | ||
247 | enum ttm_ref_type ref_type, bool *existed) | ||
248 | { | ||
249 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
250 | struct ttm_ref_object *ref; | ||
251 | struct drm_hash_item *hash; | ||
252 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
253 | int ret = -EINVAL; | ||
254 | |||
255 | if (existed != NULL) | ||
256 | *existed = true; | ||
257 | |||
258 | while (ret == -EINVAL) { | ||
259 | read_lock(&tfile->lock); | ||
260 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | ||
261 | |||
262 | if (ret == 0) { | ||
263 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
264 | kref_get(&ref->kref); | ||
265 | read_unlock(&tfile->lock); | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | read_unlock(&tfile->lock); | ||
270 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | ||
271 | false, false); | ||
272 | if (unlikely(ret != 0)) | ||
273 | return ret; | ||
274 | ref = kmalloc(sizeof(*ref), GFP_KERNEL); | ||
275 | if (unlikely(ref == NULL)) { | ||
276 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
277 | return -ENOMEM; | ||
278 | } | ||
279 | |||
280 | ref->hash.key = base->hash.key; | ||
281 | ref->obj = base; | ||
282 | ref->tfile = tfile; | ||
283 | ref->ref_type = ref_type; | ||
284 | kref_init(&ref->kref); | ||
285 | |||
286 | write_lock(&tfile->lock); | ||
287 | ret = drm_ht_insert_item(ht, &ref->hash); | ||
288 | |||
289 | if (likely(ret == 0)) { | ||
290 | list_add_tail(&ref->head, &tfile->ref_list); | ||
291 | kref_get(&base->refcount); | ||
292 | write_unlock(&tfile->lock); | ||
293 | if (existed != NULL) | ||
294 | *existed = false; | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | write_unlock(&tfile->lock); | ||
299 | BUG_ON(ret != -EINVAL); | ||
300 | |||
301 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
302 | kfree(ref); | ||
303 | } | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | EXPORT_SYMBOL(ttm_ref_object_add); | ||
308 | |||
309 | static void ttm_ref_object_release(struct kref *kref) | ||
310 | { | ||
311 | struct ttm_ref_object *ref = | ||
312 | container_of(kref, struct ttm_ref_object, kref); | ||
313 | struct ttm_base_object *base = ref->obj; | ||
314 | struct ttm_object_file *tfile = ref->tfile; | ||
315 | struct drm_open_hash *ht; | ||
316 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
317 | |||
318 | ht = &tfile->ref_hash[ref->ref_type]; | ||
319 | (void)drm_ht_remove_item(ht, &ref->hash); | ||
320 | list_del(&ref->head); | ||
321 | write_unlock(&tfile->lock); | ||
322 | |||
323 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | ||
324 | base->ref_obj_release(base, ref->ref_type); | ||
325 | |||
326 | ttm_base_object_unref(&ref->obj); | ||
327 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
328 | kfree(ref); | ||
329 | write_lock(&tfile->lock); | ||
330 | } | ||
331 | |||
332 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | ||
333 | unsigned long key, enum ttm_ref_type ref_type) | ||
334 | { | ||
335 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
336 | struct ttm_ref_object *ref; | ||
337 | struct drm_hash_item *hash; | ||
338 | int ret; | ||
339 | |||
340 | write_lock(&tfile->lock); | ||
341 | ret = drm_ht_find_item(ht, key, &hash); | ||
342 | if (unlikely(ret != 0)) { | ||
343 | write_unlock(&tfile->lock); | ||
344 | return -EINVAL; | ||
345 | } | ||
346 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
347 | kref_put(&ref->kref, ttm_ref_object_release); | ||
348 | write_unlock(&tfile->lock); | ||
349 | return 0; | ||
350 | } | ||
351 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | ||
352 | |||
353 | void ttm_object_file_release(struct ttm_object_file **p_tfile) | ||
354 | { | ||
355 | struct ttm_ref_object *ref; | ||
356 | struct list_head *list; | ||
357 | unsigned int i; | ||
358 | struct ttm_object_file *tfile = *p_tfile; | ||
359 | |||
360 | *p_tfile = NULL; | ||
361 | write_lock(&tfile->lock); | ||
362 | |||
363 | /* | ||
364 | * Since we release the lock within the loop, we have to | ||
365 | * restart it from the beginning each time. | ||
366 | */ | ||
367 | |||
368 | while (!list_empty(&tfile->ref_list)) { | ||
369 | list = tfile->ref_list.next; | ||
370 | ref = list_entry(list, struct ttm_ref_object, head); | ||
371 | ttm_ref_object_release(&ref->kref); | ||
372 | } | ||
373 | |||
374 | for (i = 0; i < TTM_REF_NUM; ++i) | ||
375 | drm_ht_remove(&tfile->ref_hash[i]); | ||
376 | |||
377 | write_unlock(&tfile->lock); | ||
378 | ttm_object_file_unref(&tfile); | ||
379 | } | ||
380 | EXPORT_SYMBOL(ttm_object_file_release); | ||
381 | |||
382 | struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | ||
383 | unsigned int hash_order) | ||
384 | { | ||
385 | struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | ||
386 | unsigned int i; | ||
387 | unsigned int j = 0; | ||
388 | int ret; | ||
389 | |||
390 | if (unlikely(tfile == NULL)) | ||
391 | return NULL; | ||
392 | |||
393 | rwlock_init(&tfile->lock); | ||
394 | tfile->tdev = tdev; | ||
395 | kref_init(&tfile->refcount); | ||
396 | INIT_LIST_HEAD(&tfile->ref_list); | ||
397 | |||
398 | for (i = 0; i < TTM_REF_NUM; ++i) { | ||
399 | ret = drm_ht_create(&tfile->ref_hash[i], hash_order); | ||
400 | if (ret) { | ||
401 | j = i; | ||
402 | goto out_err; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | return tfile; | ||
407 | out_err: | ||
408 | for (i = 0; i < j; ++i) | ||
409 | drm_ht_remove(&tfile->ref_hash[i]); | ||
410 | |||
411 | kfree(tfile); | ||
412 | |||
413 | return NULL; | ||
414 | } | ||
415 | EXPORT_SYMBOL(ttm_object_file_init); | ||
416 | |||
417 | struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global | ||
418 | *mem_glob, | ||
419 | unsigned int hash_order) | ||
420 | { | ||
421 | struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); | ||
422 | int ret; | ||
423 | |||
424 | if (unlikely(tdev == NULL)) | ||
425 | return NULL; | ||
426 | |||
427 | tdev->mem_glob = mem_glob; | ||
428 | rwlock_init(&tdev->object_lock); | ||
429 | atomic_set(&tdev->object_count, 0); | ||
430 | ret = drm_ht_create(&tdev->object_hash, hash_order); | ||
431 | |||
432 | if (likely(ret == 0)) | ||
433 | return tdev; | ||
434 | |||
435 | kfree(tdev); | ||
436 | return NULL; | ||
437 | } | ||
438 | EXPORT_SYMBOL(ttm_object_device_init); | ||
439 | |||
440 | void ttm_object_device_release(struct ttm_object_device **p_tdev) | ||
441 | { | ||
442 | struct ttm_object_device *tdev = *p_tdev; | ||
443 | |||
444 | *p_tdev = NULL; | ||
445 | |||
446 | write_lock(&tdev->object_lock); | ||
447 | drm_ht_remove(&tdev->object_hash); | ||
448 | write_unlock(&tdev->object_lock); | ||
449 | |||
450 | kfree(tdev); | ||
451 | } | ||
452 | EXPORT_SYMBOL(ttm_object_device_release); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7bcb89f39ce8..9c2b1cc5dba5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
192 | ttm->state = tt_unbound; | 192 | ttm->state = tt_unbound; |
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
195 | EXPORT_SYMBOL(ttm_tt_populate); | ||
195 | 196 | ||
196 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
197 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |