diff options
| author | Dave Airlie <airlied@redhat.com> | 2009-12-07 22:52:41 -0500 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2009-12-07 22:52:41 -0500 |
| commit | 1bd049fa895f9c6743f38b52ce14775f5a31ea63 (patch) | |
| tree | cb9163ac1c20f7fbdbde42eaab8013d0c3734aed | |
| parent | 22763c5cf3690a681551162c15d34d935308c8d7 (diff) | |
| parent | b0a007dc27d8d3ff3db07b3ea997323d9330f770 (diff) | |
Merge branch 'drm-core-next' into drm-linus
Bring all core drm changes into 2.6.32 tree and resolve
the conflict that occurs.
Conflicts:
drivers/gpu/drm/drm_fb_helper.c
39 files changed, 2509 insertions, 289 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 3c8827a7aabd..91567ac806f1 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | |||
| 15 | 15 | ||
| 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o | 16 | drm-$(CONFIG_COMPAT) += drm_ioc32.o |
| 17 | 17 | ||
| 18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o | 18 | drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o |
| 19 | 19 | ||
| 20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o | 20 | obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o |
| 21 | 21 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9b..4fe321dc900c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
| @@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = | |||
| 125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, | 125 | DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, |
| 126 | drm_tv_subconnector_enum_list) | 126 | drm_tv_subconnector_enum_list) |
| 127 | 127 | ||
| 128 | static struct drm_prop_enum_list drm_dirty_info_enum_list[] = { | ||
| 129 | { DRM_MODE_DIRTY_OFF, "Off" }, | ||
| 130 | { DRM_MODE_DIRTY_ON, "On" }, | ||
| 131 | { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, | ||
| 132 | }; | ||
| 133 | |||
| 134 | DRM_ENUM_NAME_FN(drm_get_dirty_info_name, | ||
| 135 | drm_dirty_info_enum_list) | ||
| 136 | |||
| 128 | struct drm_conn_prop_enum_list { | 137 | struct drm_conn_prop_enum_list { |
| 129 | int type; | 138 | int type; |
| 130 | char *name; | 139 | char *name; |
| @@ -802,6 +811,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev) | |||
| 802 | EXPORT_SYMBOL(drm_mode_create_dithering_property); | 811 | EXPORT_SYMBOL(drm_mode_create_dithering_property); |
| 803 | 812 | ||
| 804 | /** | 813 | /** |
| 814 | * drm_mode_create_dirty_property - create dirty property | ||
| 815 | * @dev: DRM device | ||
| 816 | * | ||
| 817 | * Called by a driver the first time it's needed, must be attached to desired | ||
| 818 | * connectors. | ||
| 819 | */ | ||
| 820 | int drm_mode_create_dirty_info_property(struct drm_device *dev) | ||
| 821 | { | ||
| 822 | struct drm_property *dirty_info; | ||
| 823 | int i; | ||
| 824 | |||
| 825 | if (dev->mode_config.dirty_info_property) | ||
| 826 | return 0; | ||
| 827 | |||
| 828 | dirty_info = | ||
| 829 | drm_property_create(dev, DRM_MODE_PROP_ENUM | | ||
| 830 | DRM_MODE_PROP_IMMUTABLE, | ||
| 831 | "dirty", | ||
| 832 | ARRAY_SIZE(drm_dirty_info_enum_list)); | ||
| 833 | for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++) | ||
| 834 | drm_property_add_enum(dirty_info, i, | ||
| 835 | drm_dirty_info_enum_list[i].type, | ||
| 836 | drm_dirty_info_enum_list[i].name); | ||
| 837 | dev->mode_config.dirty_info_property = dirty_info; | ||
| 838 | |||
| 839 | return 0; | ||
| 840 | } | ||
| 841 | EXPORT_SYMBOL(drm_mode_create_dirty_info_property); | ||
| 842 | |||
| 843 | /** | ||
| 805 | * drm_mode_config_init - initialize DRM mode_configuration structure | 844 | * drm_mode_config_init - initialize DRM mode_configuration structure |
| 806 | * @dev: DRM device | 845 | * @dev: DRM device |
| 807 | * | 846 | * |
| @@ -1753,6 +1792,71 @@ out: | |||
| 1753 | return ret; | 1792 | return ret; |
| 1754 | } | 1793 | } |
| 1755 | 1794 | ||
| 1795 | int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | ||
| 1796 | void *data, struct drm_file *file_priv) | ||
| 1797 | { | ||
| 1798 | struct drm_clip_rect __user *clips_ptr; | ||
| 1799 | struct drm_clip_rect *clips = NULL; | ||
| 1800 | struct drm_mode_fb_dirty_cmd *r = data; | ||
| 1801 | struct drm_mode_object *obj; | ||
| 1802 | struct drm_framebuffer *fb; | ||
| 1803 | unsigned flags; | ||
| 1804 | int num_clips; | ||
| 1805 | int ret = 0; | ||
| 1806 | |||
| 1807 | mutex_lock(&dev->mode_config.mutex); | ||
| 1808 | obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); | ||
| 1809 | if (!obj) { | ||
| 1810 | DRM_ERROR("invalid framebuffer id\n"); | ||
| 1811 | ret = -EINVAL; | ||
| 1812 | goto out_err1; | ||
| 1813 | } | ||
| 1814 | fb = obj_to_fb(obj); | ||
| 1815 | |||
| 1816 | num_clips = r->num_clips; | ||
| 1817 | clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr; | ||
| 1818 | |||
| 1819 | if (!num_clips != !clips_ptr) { | ||
| 1820 | ret = -EINVAL; | ||
| 1821 | goto out_err1; | ||
| 1822 | } | ||
| 1823 | |||
| 1824 | flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; | ||
| 1825 | |||
| 1826 | /* If userspace annotates copy, clips must come in pairs */ | ||
| 1827 | if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { | ||
| 1828 | ret = -EINVAL; | ||
| 1829 | goto out_err1; | ||
| 1830 | } | ||
| 1831 | |||
| 1832 | if (num_clips && clips_ptr) { | ||
| 1833 | clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); | ||
| 1834 | if (!clips) { | ||
| 1835 | ret = -ENOMEM; | ||
| 1836 | goto out_err1; | ||
| 1837 | } | ||
| 1838 | |||
| 1839 | ret = copy_from_user(clips, clips_ptr, | ||
| 1840 | num_clips * sizeof(*clips)); | ||
| 1841 | if (ret) | ||
| 1842 | goto out_err2; | ||
| 1843 | } | ||
| 1844 | |||
| 1845 | if (fb->funcs->dirty) { | ||
| 1846 | ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); | ||
| 1847 | } else { | ||
| 1848 | ret = -ENOSYS; | ||
| 1849 | goto out_err2; | ||
| 1850 | } | ||
| 1851 | |||
| 1852 | out_err2: | ||
| 1853 | kfree(clips); | ||
| 1854 | out_err1: | ||
| 1855 | mutex_unlock(&dev->mode_config.mutex); | ||
| 1856 | return ret; | ||
| 1857 | } | ||
| 1858 | |||
| 1859 | |||
| 1756 | /** | 1860 | /** |
| 1757 | * drm_fb_release - remove and free the FBs on this file | 1861 | * drm_fb_release - remove and free the FBs on this file |
| 1758 | * @filp: file * from the ioctl | 1862 | * @filp: file * from the ioctl |
| @@ -2478,3 +2582,72 @@ out: | |||
| 2478 | mutex_unlock(&dev->mode_config.mutex); | 2582 | mutex_unlock(&dev->mode_config.mutex); |
| 2479 | return ret; | 2583 | return ret; |
| 2480 | } | 2584 | } |
| 2585 | |||
| 2586 | int drm_mode_page_flip_ioctl(struct drm_device *dev, | ||
| 2587 | void *data, struct drm_file *file_priv) | ||
| 2588 | { | ||
| 2589 | struct drm_mode_crtc_page_flip *page_flip = data; | ||
| 2590 | struct drm_mode_object *obj; | ||
| 2591 | struct drm_crtc *crtc; | ||
| 2592 | struct drm_framebuffer *fb; | ||
| 2593 | struct drm_pending_vblank_event *e = NULL; | ||
| 2594 | unsigned long flags; | ||
| 2595 | int ret = -EINVAL; | ||
| 2596 | |||
| 2597 | if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || | ||
| 2598 | page_flip->reserved != 0) | ||
| 2599 | return -EINVAL; | ||
| 2600 | |||
| 2601 | mutex_lock(&dev->mode_config.mutex); | ||
| 2602 | obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
| 2603 | if (!obj) | ||
| 2604 | goto out; | ||
| 2605 | crtc = obj_to_crtc(obj); | ||
| 2606 | |||
| 2607 | if (crtc->funcs->page_flip == NULL) | ||
| 2608 | goto out; | ||
| 2609 | |||
| 2610 | obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); | ||
| 2611 | if (!obj) | ||
| 2612 | goto out; | ||
| 2613 | fb = obj_to_fb(obj); | ||
| 2614 | |||
| 2615 | if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { | ||
| 2616 | ret = -ENOMEM; | ||
| 2617 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 2618 | if (file_priv->event_space < sizeof e->event) { | ||
| 2619 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 2620 | goto out; | ||
| 2621 | } | ||
| 2622 | file_priv->event_space -= sizeof e->event; | ||
| 2623 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 2624 | |||
| 2625 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
| 2626 | if (e == NULL) { | ||
| 2627 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 2628 | file_priv->event_space += sizeof e->event; | ||
| 2629 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 2630 | goto out; | ||
| 2631 | } | ||
| 2632 | |||
| 2633 | e->event.base.type = DRM_EVENT_VBLANK; | ||
| 2634 | e->event.base.length = sizeof e->event; | ||
| 2635 | e->event.user_data = page_flip->user_data; | ||
| 2636 | e->base.event = &e->event.base; | ||
| 2637 | e->base.file_priv = file_priv; | ||
| 2638 | e->base.destroy = | ||
| 2639 | (void (*) (struct drm_pending_event *)) kfree; | ||
| 2640 | } | ||
| 2641 | |||
| 2642 | ret = crtc->funcs->page_flip(crtc, fb, e); | ||
| 2643 | if (ret) { | ||
| 2644 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 2645 | file_priv->event_space += sizeof e->event; | ||
| 2646 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 2647 | kfree(e); | ||
| 2648 | } | ||
| 2649 | |||
| 2650 | out: | ||
| 2651 | mutex_unlock(&dev->mode_config.mutex); | ||
| 2652 | return ret; | ||
| 2653 | } | ||
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index bbfd110a7168..3963b3c1081a 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c | |||
| @@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, | |||
| 109 | 109 | ||
| 110 | count = (*connector_funcs->get_modes)(connector); | 110 | count = (*connector_funcs->get_modes)(connector); |
| 111 | if (!count) { | 111 | if (!count) { |
| 112 | count = drm_add_modes_noedid(connector, 800, 600); | 112 | count = drm_add_modes_noedid(connector, 1024, 768); |
| 113 | if (!count) | 113 | if (!count) |
| 114 | return 0; | 114 | return 0; |
| 115 | } | 115 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c index a63b6f57d2d4..f1c7c856e9db 100644 --- a/drivers/gpu/drm/i915/intel_dp_i2c.c +++ b/drivers/gpu/drm/drm_dp_i2c_helper.c | |||
| @@ -28,84 +28,20 @@ | |||
| 28 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
| 29 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
| 30 | #include <linux/i2c.h> | 30 | #include <linux/i2c.h> |
| 31 | #include "intel_dp.h" | 31 | #include "drm_dp_helper.h" |
| 32 | #include "drmP.h" | 32 | #include "drmP.h" |
| 33 | 33 | ||
| 34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ | 34 | /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ |
| 35 | |||
| 36 | #define MODE_I2C_START 1 | ||
| 37 | #define MODE_I2C_WRITE 2 | ||
| 38 | #define MODE_I2C_READ 4 | ||
| 39 | #define MODE_I2C_STOP 8 | ||
| 40 | |||
| 41 | static int | 35 | static int |
| 42 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, | 36 | i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, |
| 43 | uint8_t write_byte, uint8_t *read_byte) | 37 | uint8_t write_byte, uint8_t *read_byte) |
| 44 | { | 38 | { |
| 45 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | 39 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; |
| 46 | uint16_t address = algo_data->address; | ||
| 47 | uint8_t msg[5]; | ||
| 48 | uint8_t reply[2]; | ||
| 49 | int msg_bytes; | ||
| 50 | int reply_bytes; | ||
| 51 | int ret; | 40 | int ret; |
| 52 | 41 | ||
| 53 | /* Set up the command byte */ | 42 | ret = (*algo_data->aux_ch)(adapter, mode, |
| 54 | if (mode & MODE_I2C_READ) | 43 | write_byte, read_byte); |
| 55 | msg[0] = AUX_I2C_READ << 4; | 44 | return ret; |
| 56 | else | ||
| 57 | msg[0] = AUX_I2C_WRITE << 4; | ||
| 58 | |||
| 59 | if (!(mode & MODE_I2C_STOP)) | ||
| 60 | msg[0] |= AUX_I2C_MOT << 4; | ||
| 61 | |||
| 62 | msg[1] = address >> 8; | ||
| 63 | msg[2] = address; | ||
| 64 | |||
| 65 | switch (mode) { | ||
| 66 | case MODE_I2C_WRITE: | ||
| 67 | msg[3] = 0; | ||
| 68 | msg[4] = write_byte; | ||
| 69 | msg_bytes = 5; | ||
| 70 | reply_bytes = 1; | ||
| 71 | break; | ||
| 72 | case MODE_I2C_READ: | ||
| 73 | msg[3] = 0; | ||
| 74 | msg_bytes = 4; | ||
| 75 | reply_bytes = 2; | ||
| 76 | break; | ||
| 77 | default: | ||
| 78 | msg_bytes = 3; | ||
| 79 | reply_bytes = 1; | ||
| 80 | break; | ||
| 81 | } | ||
| 82 | |||
| 83 | for (;;) { | ||
| 84 | ret = (*algo_data->aux_ch)(adapter, | ||
| 85 | msg, msg_bytes, | ||
| 86 | reply, reply_bytes); | ||
| 87 | if (ret < 0) { | ||
| 88 | DRM_DEBUG("aux_ch failed %d\n", ret); | ||
| 89 | return ret; | ||
| 90 | } | ||
| 91 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
| 92 | case AUX_I2C_REPLY_ACK: | ||
| 93 | if (mode == MODE_I2C_READ) { | ||
| 94 | *read_byte = reply[1]; | ||
| 95 | } | ||
| 96 | return reply_bytes - 1; | ||
| 97 | case AUX_I2C_REPLY_NACK: | ||
| 98 | DRM_DEBUG("aux_ch nack\n"); | ||
| 99 | return -EREMOTEIO; | ||
| 100 | case AUX_I2C_REPLY_DEFER: | ||
| 101 | DRM_DEBUG("aux_ch defer\n"); | ||
| 102 | udelay(100); | ||
| 103 | break; | ||
| 104 | default: | ||
| 105 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
| 106 | return -EREMOTEIO; | ||
| 107 | } | ||
| 108 | } | ||
| 109 | } | 45 | } |
| 110 | 46 | ||
| 111 | /* | 47 | /* |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a75ca63deea6..ff2f1042cb44 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = { | |||
| 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 145 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
| 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 146 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
| 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), | 147 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), |
| 148 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), | ||
| 149 | DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) | ||
| 148 | }; | 150 | }; |
| 149 | 151 | ||
| 150 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | 152 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) |
| @@ -366,6 +368,29 @@ module_init(drm_core_init); | |||
| 366 | module_exit(drm_core_exit); | 368 | module_exit(drm_core_exit); |
| 367 | 369 | ||
| 368 | /** | 370 | /** |
| 371 | * Copy and IOCTL return string to user space | ||
| 372 | */ | ||
| 373 | static int drm_copy_field(char *buf, size_t *buf_len, const char *value) | ||
| 374 | { | ||
| 375 | int len; | ||
| 376 | |||
| 377 | /* don't overflow userbuf */ | ||
| 378 | len = strlen(value); | ||
| 379 | if (len > *buf_len) | ||
| 380 | len = *buf_len; | ||
| 381 | |||
| 382 | /* let userspace know exact length of driver value (which could be | ||
| 383 | * larger than the userspace-supplied buffer) */ | ||
| 384 | *buf_len = strlen(value); | ||
| 385 | |||
| 386 | /* finally, try filling in the userbuf */ | ||
| 387 | if (len && buf) | ||
| 388 | if (copy_to_user(buf, value, len)) | ||
| 389 | return -EFAULT; | ||
| 390 | return 0; | ||
| 391 | } | ||
| 392 | |||
| 393 | /** | ||
| 369 | * Get version information | 394 | * Get version information |
| 370 | * | 395 | * |
| 371 | * \param inode device inode. | 396 | * \param inode device inode. |
| @@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data, | |||
| 380 | struct drm_file *file_priv) | 405 | struct drm_file *file_priv) |
| 381 | { | 406 | { |
| 382 | struct drm_version *version = data; | 407 | struct drm_version *version = data; |
| 383 | int len; | 408 | int err; |
| 384 | 409 | ||
| 385 | version->version_major = dev->driver->major; | 410 | version->version_major = dev->driver->major; |
| 386 | version->version_minor = dev->driver->minor; | 411 | version->version_minor = dev->driver->minor; |
| 387 | version->version_patchlevel = dev->driver->patchlevel; | 412 | version->version_patchlevel = dev->driver->patchlevel; |
| 388 | DRM_COPY(version->name, dev->driver->name); | 413 | err = drm_copy_field(version->name, &version->name_len, |
| 389 | DRM_COPY(version->date, dev->driver->date); | 414 | dev->driver->name); |
| 390 | DRM_COPY(version->desc, dev->driver->desc); | 415 | if (!err) |
| 391 | 416 | err = drm_copy_field(version->date, &version->date_len, | |
| 392 | return 0; | 417 | dev->driver->date); |
| 418 | if (!err) | ||
| 419 | err = drm_copy_field(version->desc, &version->desc_len, | ||
| 420 | dev->driver->desc); | ||
| 421 | |||
| 422 | return err; | ||
| 393 | } | 423 | } |
| 394 | 424 | ||
| 395 | /** | 425 | /** |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index b54ba63d506e..c39b26f1abed 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -123,18 +123,20 @@ static const u8 edid_header[] = { | |||
| 123 | */ | 123 | */ |
| 124 | static bool edid_is_valid(struct edid *edid) | 124 | static bool edid_is_valid(struct edid *edid) |
| 125 | { | 125 | { |
| 126 | int i; | 126 | int i, score = 0; |
| 127 | u8 csum = 0; | 127 | u8 csum = 0; |
| 128 | u8 *raw_edid = (u8 *)edid; | 128 | u8 *raw_edid = (u8 *)edid; |
| 129 | 129 | ||
| 130 | if (memcmp(edid->header, edid_header, sizeof(edid_header))) | 130 | for (i = 0; i < sizeof(edid_header); i++) |
| 131 | goto bad; | 131 | if (raw_edid[i] == edid_header[i]) |
| 132 | if (edid->version != 1) { | 132 | score++; |
| 133 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | 133 | |
| 134 | if (score == 8) ; | ||
| 135 | else if (score >= 6) { | ||
| 136 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | ||
| 137 | memcpy(raw_edid, edid_header, sizeof(edid_header)); | ||
| 138 | } else | ||
| 134 | goto bad; | 139 | goto bad; |
| 135 | } | ||
| 136 | if (edid->revision > 4) | ||
| 137 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
| 138 | 140 | ||
| 139 | for (i = 0; i < EDID_LENGTH; i++) | 141 | for (i = 0; i < EDID_LENGTH; i++) |
| 140 | csum += raw_edid[i]; | 142 | csum += raw_edid[i]; |
| @@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid) | |||
| 143 | goto bad; | 145 | goto bad; |
| 144 | } | 146 | } |
| 145 | 147 | ||
| 148 | if (edid->version != 1) { | ||
| 149 | DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); | ||
| 150 | goto bad; | ||
| 151 | } | ||
| 152 | |||
| 153 | if (edid->revision > 4) | ||
| 154 | DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n"); | ||
| 155 | |||
| 146 | return 1; | 156 | return 1; |
| 147 | 157 | ||
| 148 | bad: | 158 | bad: |
| @@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = { | |||
| 481 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, | 491 | 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, |
| 482 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | 492 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
| 483 | }; | 493 | }; |
| 494 | static const int drm_num_dmt_modes = | ||
| 495 | sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
| 484 | 496 | ||
| 485 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, | 497 | static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, |
| 486 | int hsize, int vsize, int fresh) | 498 | int hsize, int vsize, int fresh) |
| 487 | { | 499 | { |
| 488 | int i, count; | 500 | int i; |
| 489 | struct drm_display_mode *ptr, *mode; | 501 | struct drm_display_mode *ptr, *mode; |
| 490 | 502 | ||
| 491 | count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); | ||
| 492 | mode = NULL; | 503 | mode = NULL; |
| 493 | for (i = 0; i < count; i++) { | 504 | for (i = 0; i < drm_num_dmt_modes; i++) { |
| 494 | ptr = &drm_dmt_modes[i]; | 505 | ptr = &drm_dmt_modes[i]; |
| 495 | if (hsize == ptr->hdisplay && | 506 | if (hsize == ptr->hdisplay && |
| 496 | vsize == ptr->vdisplay && | 507 | vsize == ptr->vdisplay && |
| @@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
| 834 | return modes; | 845 | return modes; |
| 835 | } | 846 | } |
| 836 | 847 | ||
| 848 | /* | ||
| 849 | * XXX fix this for: | ||
| 850 | * - GTF secondary curve formula | ||
| 851 | * - EDID 1.4 range offsets | ||
| 852 | * - CVT extended bits | ||
| 853 | */ | ||
| 854 | static bool | ||
| 855 | mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing) | ||
| 856 | { | ||
| 857 | struct detailed_data_monitor_range *range; | ||
| 858 | int hsync, vrefresh; | ||
| 859 | |||
| 860 | range = &timing->data.other_data.data.range; | ||
| 861 | |||
| 862 | hsync = drm_mode_hsync(mode); | ||
| 863 | vrefresh = drm_mode_vrefresh(mode); | ||
| 864 | |||
| 865 | if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz) | ||
| 866 | return false; | ||
| 867 | |||
| 868 | if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq) | ||
| 869 | return false; | ||
| 870 | |||
| 871 | if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) { | ||
| 872 | /* be forgiving since it's in units of 10MHz */ | ||
| 873 | int max_clock = range->pixel_clock_mhz * 10 + 9; | ||
| 874 | max_clock *= 1000; | ||
| 875 | if (mode->clock > max_clock) | ||
| 876 | return false; | ||
| 877 | } | ||
| 878 | |||
| 879 | return true; | ||
| 880 | } | ||
| 881 | |||
| 882 | /* | ||
| 883 | * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will | ||
| 884 | * need to account for them. | ||
| 885 | */ | ||
| 886 | static int drm_gtf_modes_for_range(struct drm_connector *connector, | ||
| 887 | struct detailed_timing *timing) | ||
| 888 | { | ||
| 889 | int i, modes = 0; | ||
| 890 | struct drm_display_mode *newmode; | ||
| 891 | struct drm_device *dev = connector->dev; | ||
| 892 | |||
| 893 | for (i = 0; i < drm_num_dmt_modes; i++) { | ||
| 894 | if (mode_in_range(drm_dmt_modes + i, timing)) { | ||
| 895 | newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); | ||
| 896 | if (newmode) { | ||
| 897 | drm_mode_probed_add(connector, newmode); | ||
| 898 | modes++; | ||
| 899 | } | ||
| 900 | } | ||
| 901 | } | ||
| 902 | |||
| 903 | return modes; | ||
| 904 | } | ||
| 905 | |||
| 906 | static int drm_cvt_modes(struct drm_connector *connector, | ||
| 907 | struct detailed_timing *timing) | ||
| 908 | { | ||
| 909 | int i, j, modes = 0; | ||
| 910 | struct drm_display_mode *newmode; | ||
| 911 | struct drm_device *dev = connector->dev; | ||
| 912 | struct cvt_timing *cvt; | ||
| 913 | const int rates[] = { 60, 85, 75, 60, 50 }; | ||
| 914 | |||
| 915 | for (i = 0; i < 4; i++) { | ||
| 916 | int width, height; | ||
| 917 | cvt = &(timing->data.other_data.data.cvt[i]); | ||
| 918 | |||
| 919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; | ||
| 920 | switch (cvt->code[1] & 0xc0) { | ||
| 921 | case 0x00: | ||
| 922 | width = height * 4 / 3; | ||
| 923 | break; | ||
| 924 | case 0x40: | ||
| 925 | width = height * 16 / 9; | ||
| 926 | break; | ||
| 927 | case 0x80: | ||
| 928 | width = height * 16 / 10; | ||
| 929 | break; | ||
| 930 | case 0xc0: | ||
| 931 | width = height * 15 / 9; | ||
| 932 | break; | ||
| 933 | } | ||
| 934 | |||
| 935 | for (j = 1; j < 5; j++) { | ||
| 936 | if (cvt->code[2] & (1 << j)) { | ||
| 937 | newmode = drm_cvt_mode(dev, width, height, | ||
| 938 | rates[j], j == 0, | ||
| 939 | false, false); | ||
| 940 | if (newmode) { | ||
| 941 | drm_mode_probed_add(connector, newmode); | ||
| 942 | modes++; | ||
| 943 | } | ||
| 944 | } | ||
| 945 | } | ||
| 946 | } | ||
| 947 | |||
| 948 | return modes; | ||
| 949 | } | ||
| 950 | |||
| 951 | static int add_detailed_modes(struct drm_connector *connector, | ||
| 952 | struct detailed_timing *timing, | ||
| 953 | struct edid *edid, u32 quirks, int preferred) | ||
| 954 | { | ||
| 955 | int i, modes = 0; | ||
| 956 | struct detailed_non_pixel *data = &timing->data.other_data; | ||
| 957 | int timing_level = standard_timing_level(edid); | ||
| 958 | int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF); | ||
| 959 | struct drm_display_mode *newmode; | ||
| 960 | struct drm_device *dev = connector->dev; | ||
| 961 | |||
| 962 | if (timing->pixel_clock) { | ||
| 963 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
| 964 | if (!newmode) | ||
| 965 | return 0; | ||
| 966 | |||
| 967 | if (preferred) | ||
| 968 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | ||
| 969 | |||
| 970 | drm_mode_probed_add(connector, newmode); | ||
| 971 | return 1; | ||
| 972 | } | ||
| 973 | |||
| 974 | /* other timing types */ | ||
| 975 | switch (data->type) { | ||
| 976 | case EDID_DETAIL_MONITOR_RANGE: | ||
| 977 | if (gtf) | ||
| 978 | modes += drm_gtf_modes_for_range(connector, timing); | ||
| 979 | break; | ||
| 980 | case EDID_DETAIL_STD_MODES: | ||
| 981 | /* Six modes per detailed section */ | ||
| 982 | for (i = 0; i < 6; i++) { | ||
| 983 | struct std_timing *std; | ||
| 984 | struct drm_display_mode *newmode; | ||
| 985 | |||
| 986 | std = &data->data.timings[i]; | ||
| 987 | newmode = drm_mode_std(dev, std, edid->revision, | ||
| 988 | timing_level); | ||
| 989 | if (newmode) { | ||
| 990 | drm_mode_probed_add(connector, newmode); | ||
| 991 | modes++; | ||
| 992 | } | ||
| 993 | } | ||
| 994 | break; | ||
| 995 | case EDID_DETAIL_CVT_3BYTE: | ||
| 996 | modes += drm_cvt_modes(connector, timing); | ||
| 997 | break; | ||
| 998 | default: | ||
| 999 | break; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | return modes; | ||
| 1003 | } | ||
| 1004 | |||
| 837 | /** | 1005 | /** |
| 838 | * add_detailed_modes - get detailed mode info from EDID data | 1006 | * add_detailed_info - get detailed mode info from EDID data |
| 839 | * @connector: attached connector | 1007 | * @connector: attached connector |
| 840 | * @edid: EDID block to scan | 1008 | * @edid: EDID block to scan |
| 841 | * @quirks: quirks to apply | 1009 | * @quirks: quirks to apply |
| @@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid | |||
| 846 | static int add_detailed_info(struct drm_connector *connector, | 1014 | static int add_detailed_info(struct drm_connector *connector, |
| 847 | struct edid *edid, u32 quirks) | 1015 | struct edid *edid, u32 quirks) |
| 848 | { | 1016 | { |
| 849 | struct drm_device *dev = connector->dev; | 1017 | int i, modes = 0; |
| 850 | int i, j, modes = 0; | ||
| 851 | int timing_level; | ||
| 852 | |||
| 853 | timing_level = standard_timing_level(edid); | ||
| 854 | 1018 | ||
| 855 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { | 1019 | for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { |
| 856 | struct detailed_timing *timing = &edid->detailed_timings[i]; | 1020 | struct detailed_timing *timing = &edid->detailed_timings[i]; |
| 857 | struct detailed_non_pixel *data = &timing->data.other_data; | 1021 | int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING); |
| 858 | struct drm_display_mode *newmode; | ||
| 859 | |||
| 860 | /* X server check is version 1.1 or higher */ | ||
| 861 | if (edid->version == 1 && edid->revision >= 1 && | ||
| 862 | !timing->pixel_clock) { | ||
| 863 | /* Other timing or info */ | ||
| 864 | switch (data->type) { | ||
| 865 | case EDID_DETAIL_MONITOR_SERIAL: | ||
| 866 | break; | ||
| 867 | case EDID_DETAIL_MONITOR_STRING: | ||
| 868 | break; | ||
| 869 | case EDID_DETAIL_MONITOR_RANGE: | ||
| 870 | /* Get monitor range data */ | ||
| 871 | break; | ||
| 872 | case EDID_DETAIL_MONITOR_NAME: | ||
| 873 | break; | ||
| 874 | case EDID_DETAIL_MONITOR_CPDATA: | ||
| 875 | break; | ||
| 876 | case EDID_DETAIL_STD_MODES: | ||
| 877 | for (j = 0; j < 6; i++) { | ||
| 878 | struct std_timing *std; | ||
| 879 | struct drm_display_mode *newmode; | ||
| 880 | |||
| 881 | std = &data->data.timings[j]; | ||
| 882 | newmode = drm_mode_std(dev, std, | ||
| 883 | edid->revision, | ||
| 884 | timing_level); | ||
| 885 | if (newmode) { | ||
| 886 | drm_mode_probed_add(connector, newmode); | ||
| 887 | modes++; | ||
| 888 | } | ||
| 889 | } | ||
| 890 | break; | ||
| 891 | default: | ||
| 892 | break; | ||
| 893 | } | ||
| 894 | } else { | ||
| 895 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
| 896 | if (!newmode) | ||
| 897 | continue; | ||
| 898 | 1022 | ||
| 899 | /* First detailed mode is preferred */ | 1023 | /* In 1.0, only timings are allowed */ |
| 900 | if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) | 1024 | if (!timing->pixel_clock && edid->version == 1 && |
| 901 | newmode->type |= DRM_MODE_TYPE_PREFERRED; | 1025 | edid->revision == 0) |
| 902 | drm_mode_probed_add(connector, newmode); | 1026 | continue; |
| 903 | 1027 | ||
| 904 | modes++; | 1028 | modes += add_detailed_modes(connector, timing, edid, quirks, |
| 905 | } | 1029 | preferred); |
| 906 | } | 1030 | } |
| 907 | 1031 | ||
| 908 | return modes; | 1032 | return modes; |
| 909 | } | 1033 | } |
| 1034 | |||
| 910 | /** | 1035 | /** |
| 911 | * add_detailed_mode_eedid - get detailed mode info from addtional timing | 1036 | * add_detailed_mode_eedid - get detailed mode info from addtional timing |
| 912 | * EDID block | 1037 | * EDID block |
| @@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector, | |||
| 920 | static int add_detailed_info_eedid(struct drm_connector *connector, | 1045 | static int add_detailed_info_eedid(struct drm_connector *connector, |
| 921 | struct edid *edid, u32 quirks) | 1046 | struct edid *edid, u32 quirks) |
| 922 | { | 1047 | { |
| 923 | struct drm_device *dev = connector->dev; | 1048 | int i, modes = 0; |
| 924 | int i, j, modes = 0; | ||
| 925 | char *edid_ext = NULL; | 1049 | char *edid_ext = NULL; |
| 926 | struct detailed_timing *timing; | 1050 | struct detailed_timing *timing; |
| 927 | struct detailed_non_pixel *data; | ||
| 928 | struct drm_display_mode *newmode; | ||
| 929 | int edid_ext_num; | 1051 | int edid_ext_num; |
| 930 | int start_offset, end_offset; | 1052 | int start_offset, end_offset; |
| 931 | int timing_level; | 1053 | int timing_level; |
| @@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector, | |||
| 976 | for (i = start_offset; i < end_offset; | 1098 | for (i = start_offset; i < end_offset; |
| 977 | i += sizeof(struct detailed_timing)) { | 1099 | i += sizeof(struct detailed_timing)) { |
| 978 | timing = (struct detailed_timing *)(edid_ext + i); | 1100 | timing = (struct detailed_timing *)(edid_ext + i); |
| 979 | data = &timing->data.other_data; | 1101 | modes += add_detailed_modes(connector, timing, edid, quirks, 0); |
| 980 | /* Detailed mode timing */ | ||
| 981 | if (timing->pixel_clock) { | ||
| 982 | newmode = drm_mode_detailed(dev, edid, timing, quirks); | ||
| 983 | if (!newmode) | ||
| 984 | continue; | ||
| 985 | |||
| 986 | drm_mode_probed_add(connector, newmode); | ||
| 987 | |||
| 988 | modes++; | ||
| 989 | continue; | ||
| 990 | } | ||
| 991 | |||
| 992 | /* Other timing or info */ | ||
| 993 | switch (data->type) { | ||
| 994 | case EDID_DETAIL_MONITOR_SERIAL: | ||
| 995 | break; | ||
| 996 | case EDID_DETAIL_MONITOR_STRING: | ||
| 997 | break; | ||
| 998 | case EDID_DETAIL_MONITOR_RANGE: | ||
| 999 | /* Get monitor range data */ | ||
| 1000 | break; | ||
| 1001 | case EDID_DETAIL_MONITOR_NAME: | ||
| 1002 | break; | ||
| 1003 | case EDID_DETAIL_MONITOR_CPDATA: | ||
| 1004 | break; | ||
| 1005 | case EDID_DETAIL_STD_MODES: | ||
| 1006 | /* Five modes per detailed section */ | ||
| 1007 | for (j = 0; j < 5; i++) { | ||
| 1008 | struct std_timing *std; | ||
| 1009 | struct drm_display_mode *newmode; | ||
| 1010 | |||
| 1011 | std = &data->data.timings[j]; | ||
| 1012 | newmode = drm_mode_std(dev, std, | ||
| 1013 | edid->revision, | ||
| 1014 | timing_level); | ||
| 1015 | if (newmode) { | ||
| 1016 | drm_mode_probed_add(connector, newmode); | ||
| 1017 | modes++; | ||
| 1018 | } | ||
| 1019 | } | ||
| 1020 | break; | ||
| 1021 | default: | ||
| 1022 | break; | ||
| 1023 | } | ||
| 1024 | } | 1102 | } |
| 1025 | 1103 | ||
| 1026 | return modes; | 1104 | return modes; |
| @@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector, | |||
| 1066 | struct i2c_adapter *adapter, | 1144 | struct i2c_adapter *adapter, |
| 1067 | char *buf, int len) | 1145 | char *buf, int len) |
| 1068 | { | 1146 | { |
| 1069 | int ret; | 1147 | int i; |
| 1070 | 1148 | ||
| 1071 | ret = drm_do_probe_ddc_edid(adapter, buf, len); | 1149 | for (i = 0; i < 4; i++) { |
| 1072 | if (ret != 0) { | 1150 | if (drm_do_probe_ddc_edid(adapter, buf, len)) |
| 1073 | goto end; | 1151 | return -1; |
| 1074 | } | 1152 | if (edid_is_valid((struct edid *)buf)) |
| 1075 | if (!edid_is_valid((struct edid *)buf)) { | 1153 | return 0; |
| 1076 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
| 1077 | drm_get_connector_name(connector)); | ||
| 1078 | ret = -1; | ||
| 1079 | } | 1154 | } |
| 1080 | end: | 1155 | |
| 1081 | return ret; | 1156 | /* repeated checksum failures; warn, but carry on */ |
| 1157 | dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", | ||
| 1158 | drm_get_connector_name(connector)); | ||
| 1159 | return -1; | ||
| 1082 | } | 1160 | } |
| 1083 | 1161 | ||
| 1084 | /** | 1162 | /** |
| @@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector, | |||
| 1296 | ptr->vdisplay > vdisplay) | 1374 | ptr->vdisplay > vdisplay) |
| 1297 | continue; | 1375 | continue; |
| 1298 | } | 1376 | } |
| 1377 | if (drm_mode_vrefresh(ptr) > 61) | ||
| 1378 | continue; | ||
| 1299 | mode = drm_mode_duplicate(dev, ptr); | 1379 | mode = drm_mode_duplicate(dev, ptr); |
| 1300 | if (mode) { | 1380 | if (mode) { |
| 1301 | drm_mode_probed_add(connector, mode); | 1381 | drm_mode_probed_add(connector, mode); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 65ef011fa8ba..1b49fa055f4f 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 373 | mutex_unlock(&dev->mode_config.mutex); | 373 | mutex_unlock(&dev->mode_config.mutex); |
| 374 | } | 374 | } |
| 375 | } | 375 | } |
| 376 | if (dpms_mode == DRM_MODE_DPMS_OFF) { | 376 | mutex_lock(&dev->mode_config.mutex); |
| 377 | mutex_lock(&dev->mode_config.mutex); | 377 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); |
| 378 | crtc_funcs->dpms(crtc, dpms_mode); | 378 | mutex_unlock(&dev->mode_config.mutex); |
| 379 | mutex_unlock(&dev->mode_config.mutex); | ||
| 380 | } | ||
| 381 | } | 379 | } |
| 382 | } | 380 | } |
| 383 | } | 381 | } |
| @@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) | |||
| 385 | int drm_fb_helper_blank(int blank, struct fb_info *info) | 383 | int drm_fb_helper_blank(int blank, struct fb_info *info) |
| 386 | { | 384 | { |
| 387 | switch (blank) { | 385 | switch (blank) { |
| 386 | /* Display: On; HSync: On, VSync: On */ | ||
| 388 | case FB_BLANK_UNBLANK: | 387 | case FB_BLANK_UNBLANK: |
| 389 | drm_fb_helper_on(info); | 388 | drm_fb_helper_on(info); |
| 390 | break; | 389 | break; |
| 390 | /* Display: Off; HSync: On, VSync: On */ | ||
| 391 | case FB_BLANK_NORMAL: | 391 | case FB_BLANK_NORMAL: |
| 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 392 | drm_fb_helper_off(info, DRM_MODE_DPMS_ON); |
| 393 | break; | 393 | break; |
| 394 | /* Display: Off; HSync: Off, VSync: On */ | ||
| 394 | case FB_BLANK_HSYNC_SUSPEND: | 395 | case FB_BLANK_HSYNC_SUSPEND: |
| 395 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); | 396 | drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); |
| 396 | break; | 397 | break; |
| 398 | /* Display: Off; HSync: On, VSync: Off */ | ||
| 397 | case FB_BLANK_VSYNC_SUSPEND: | 399 | case FB_BLANK_VSYNC_SUSPEND: |
| 398 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); | 400 | drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); |
| 399 | break; | 401 | break; |
| 402 | /* Display: Off; HSync: Off, VSync: Off */ | ||
| 400 | case FB_BLANK_POWERDOWN: | 403 | case FB_BLANK_POWERDOWN: |
| 401 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); | 404 | drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); |
| 402 | break; | 405 | break; |
| @@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, | |||
| 905 | 908 | ||
| 906 | if (new_fb) { | 909 | if (new_fb) { |
| 907 | info->var.pixclock = 0; | 910 | info->var.pixclock = 0; |
| 908 | if (register_framebuffer(info) < 0) | 911 | ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0); |
| 912 | if (ret) | ||
| 913 | return ret; | ||
| 914 | if (register_framebuffer(info) < 0) { | ||
| 915 | fb_dealloc_cmap(&info->cmap); | ||
| 909 | return -EINVAL; | 916 | return -EINVAL; |
| 917 | } | ||
| 910 | } else { | 918 | } else { |
| 911 | drm_fb_helper_set_par(info); | 919 | drm_fb_helper_set_par(info); |
| 912 | } | 920 | } |
| @@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) | |||
| 936 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); | 944 | unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); |
| 937 | } | 945 | } |
| 938 | drm_fb_helper_crtc_free(helper); | 946 | drm_fb_helper_crtc_free(helper); |
| 947 | fb_dealloc_cmap(&helper->fb->fbdev->cmap); | ||
| 939 | } | 948 | } |
| 940 | EXPORT_SYMBOL(drm_fb_helper_free); | 949 | EXPORT_SYMBOL(drm_fb_helper_free); |
| 941 | 950 | ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 251bc0e3b5ec..08d14df3bb42 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c | |||
| @@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
| 257 | 257 | ||
| 258 | INIT_LIST_HEAD(&priv->lhead); | 258 | INIT_LIST_HEAD(&priv->lhead); |
| 259 | INIT_LIST_HEAD(&priv->fbs); | 259 | INIT_LIST_HEAD(&priv->fbs); |
| 260 | INIT_LIST_HEAD(&priv->event_list); | ||
| 261 | init_waitqueue_head(&priv->event_wait); | ||
| 262 | priv->event_space = 4096; /* set aside 4k for event buffer */ | ||
| 260 | 263 | ||
| 261 | if (dev->driver->driver_features & DRIVER_GEM) | 264 | if (dev->driver->driver_features & DRIVER_GEM) |
| 262 | drm_gem_open(dev, priv); | 265 | drm_gem_open(dev, priv); |
| @@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | |||
| 297 | goto out_free; | 300 | goto out_free; |
| 298 | } | 301 | } |
| 299 | } | 302 | } |
| 303 | mutex_lock(&dev->struct_mutex); | ||
| 304 | if (dev->driver->master_set) { | ||
| 305 | ret = dev->driver->master_set(dev, priv, true); | ||
| 306 | if (ret) { | ||
| 307 | /* drop both references if this fails */ | ||
| 308 | drm_master_put(&priv->minor->master); | ||
| 309 | drm_master_put(&priv->master); | ||
| 310 | mutex_unlock(&dev->struct_mutex); | ||
| 311 | goto out_free; | ||
| 312 | } | ||
| 313 | } | ||
| 314 | mutex_unlock(&dev->struct_mutex); | ||
| 300 | } else { | 315 | } else { |
| 301 | /* get a reference to the master */ | 316 | /* get a reference to the master */ |
| 302 | priv->master = drm_master_get(priv->minor->master); | 317 | priv->master = drm_master_get(priv->minor->master); |
| @@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp) | |||
| 413 | } | 428 | } |
| 414 | } | 429 | } |
| 415 | 430 | ||
| 431 | static void drm_events_release(struct drm_file *file_priv) | ||
| 432 | { | ||
| 433 | struct drm_device *dev = file_priv->minor->dev; | ||
| 434 | struct drm_pending_event *e, *et; | ||
| 435 | struct drm_pending_vblank_event *v, *vt; | ||
| 436 | unsigned long flags; | ||
| 437 | |||
| 438 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 439 | |||
| 440 | /* Remove pending flips */ | ||
| 441 | list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link) | ||
| 442 | if (v->base.file_priv == file_priv) { | ||
| 443 | list_del(&v->base.link); | ||
| 444 | drm_vblank_put(dev, v->pipe); | ||
| 445 | v->base.destroy(&v->base); | ||
| 446 | } | ||
| 447 | |||
| 448 | /* Remove unconsumed events */ | ||
| 449 | list_for_each_entry_safe(e, et, &file_priv->event_list, link) | ||
| 450 | e->destroy(e); | ||
| 451 | |||
| 452 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 453 | } | ||
| 454 | |||
| 416 | /** | 455 | /** |
| 417 | * Release file. | 456 | * Release file. |
| 418 | * | 457 | * |
| @@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 451 | if (file_priv->minor->master) | 490 | if (file_priv->minor->master) |
| 452 | drm_master_release(dev, filp); | 491 | drm_master_release(dev, filp); |
| 453 | 492 | ||
| 493 | drm_events_release(file_priv); | ||
| 494 | |||
| 454 | if (dev->driver->driver_features & DRIVER_GEM) | 495 | if (dev->driver->driver_features & DRIVER_GEM) |
| 455 | drm_gem_release(dev, file_priv); | 496 | drm_gem_release(dev, file_priv); |
| 456 | 497 | ||
| @@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 504 | 545 | ||
| 505 | if (file_priv->minor->master == file_priv->master) { | 546 | if (file_priv->minor->master == file_priv->master) { |
| 506 | /* drop the reference held my the minor */ | 547 | /* drop the reference held my the minor */ |
| 548 | if (dev->driver->master_drop) | ||
| 549 | dev->driver->master_drop(dev, file_priv, true); | ||
| 507 | drm_master_put(&file_priv->minor->master); | 550 | drm_master_put(&file_priv->minor->master); |
| 508 | } | 551 | } |
| 509 | } | 552 | } |
| @@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp) | |||
| 544 | } | 587 | } |
| 545 | EXPORT_SYMBOL(drm_release); | 588 | EXPORT_SYMBOL(drm_release); |
| 546 | 589 | ||
| 547 | /** No-op. */ | 590 | static bool |
| 591 | drm_dequeue_event(struct drm_file *file_priv, | ||
| 592 | size_t total, size_t max, struct drm_pending_event **out) | ||
| 593 | { | ||
| 594 | struct drm_device *dev = file_priv->minor->dev; | ||
| 595 | struct drm_pending_event *e; | ||
| 596 | unsigned long flags; | ||
| 597 | bool ret = false; | ||
| 598 | |||
| 599 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 600 | |||
| 601 | *out = NULL; | ||
| 602 | if (list_empty(&file_priv->event_list)) | ||
| 603 | goto out; | ||
| 604 | e = list_first_entry(&file_priv->event_list, | ||
| 605 | struct drm_pending_event, link); | ||
| 606 | if (e->event->length + total > max) | ||
| 607 | goto out; | ||
| 608 | |||
| 609 | file_priv->event_space += e->event->length; | ||
| 610 | list_del(&e->link); | ||
| 611 | *out = e; | ||
| 612 | ret = true; | ||
| 613 | |||
| 614 | out: | ||
| 615 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 616 | return ret; | ||
| 617 | } | ||
| 618 | |||
| 619 | ssize_t drm_read(struct file *filp, char __user *buffer, | ||
| 620 | size_t count, loff_t *offset) | ||
| 621 | { | ||
| 622 | struct drm_file *file_priv = filp->private_data; | ||
| 623 | struct drm_pending_event *e; | ||
| 624 | size_t total; | ||
| 625 | ssize_t ret; | ||
| 626 | |||
| 627 | ret = wait_event_interruptible(file_priv->event_wait, | ||
| 628 | !list_empty(&file_priv->event_list)); | ||
| 629 | if (ret < 0) | ||
| 630 | return ret; | ||
| 631 | |||
| 632 | total = 0; | ||
| 633 | while (drm_dequeue_event(file_priv, total, count, &e)) { | ||
| 634 | if (copy_to_user(buffer + total, | ||
| 635 | e->event, e->event->length)) { | ||
| 636 | total = -EFAULT; | ||
| 637 | break; | ||
| 638 | } | ||
| 639 | |||
| 640 | total += e->event->length; | ||
| 641 | e->destroy(e); | ||
| 642 | } | ||
| 643 | |||
| 644 | return total; | ||
| 645 | } | ||
| 646 | EXPORT_SYMBOL(drm_read); | ||
| 647 | |||
| 548 | unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) | 648 | unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) |
| 549 | { | 649 | { |
| 550 | return 0; | 650 | struct drm_file *file_priv = filp->private_data; |
| 651 | unsigned int mask = 0; | ||
| 652 | |||
| 653 | poll_wait(filp, &file_priv->event_wait, wait); | ||
| 654 | |||
| 655 | if (!list_empty(&file_priv->event_list)) | ||
| 656 | mask |= POLLIN | POLLRDNORM; | ||
| 657 | |||
| 658 | return mask; | ||
| 551 | } | 659 | } |
| 552 | EXPORT_SYMBOL(drm_poll); | 660 | EXPORT_SYMBOL(drm_poll); |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 0a6f0b3bdc78..6b3ce6d38848 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
| @@ -550,6 +550,63 @@ out: | |||
| 550 | return ret; | 550 | return ret; |
| 551 | } | 551 | } |
| 552 | 552 | ||
| 553 | static int drm_queue_vblank_event(struct drm_device *dev, int pipe, | ||
| 554 | union drm_wait_vblank *vblwait, | ||
| 555 | struct drm_file *file_priv) | ||
| 556 | { | ||
| 557 | struct drm_pending_vblank_event *e; | ||
| 558 | struct timeval now; | ||
| 559 | unsigned long flags; | ||
| 560 | unsigned int seq; | ||
| 561 | |||
| 562 | e = kzalloc(sizeof *e, GFP_KERNEL); | ||
| 563 | if (e == NULL) | ||
| 564 | return -ENOMEM; | ||
| 565 | |||
| 566 | e->pipe = pipe; | ||
| 567 | e->event.base.type = DRM_EVENT_VBLANK; | ||
| 568 | e->event.base.length = sizeof e->event; | ||
| 569 | e->event.user_data = vblwait->request.signal; | ||
| 570 | e->base.event = &e->event.base; | ||
| 571 | e->base.file_priv = file_priv; | ||
| 572 | e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; | ||
| 573 | |||
| 574 | do_gettimeofday(&now); | ||
| 575 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 576 | |||
| 577 | if (file_priv->event_space < sizeof e->event) { | ||
| 578 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 579 | kfree(e); | ||
| 580 | return -ENOMEM; | ||
| 581 | } | ||
| 582 | |||
| 583 | file_priv->event_space -= sizeof e->event; | ||
| 584 | seq = drm_vblank_count(dev, pipe); | ||
| 585 | if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && | ||
| 586 | (seq - vblwait->request.sequence) <= (1 << 23)) { | ||
| 587 | vblwait->request.sequence = seq + 1; | ||
| 588 | vblwait->reply.sequence = vblwait->request.sequence; | ||
| 589 | } | ||
| 590 | |||
| 591 | DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", | ||
| 592 | vblwait->request.sequence, seq, pipe); | ||
| 593 | |||
| 594 | e->event.sequence = vblwait->request.sequence; | ||
| 595 | if ((seq - vblwait->request.sequence) <= (1 << 23)) { | ||
| 596 | e->event.tv_sec = now.tv_sec; | ||
| 597 | e->event.tv_usec = now.tv_usec; | ||
| 598 | drm_vblank_put(dev, e->pipe); | ||
| 599 | list_add_tail(&e->base.link, &e->base.file_priv->event_list); | ||
| 600 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
| 601 | } else { | ||
| 602 | list_add_tail(&e->base.link, &dev->vblank_event_list); | ||
| 603 | } | ||
| 604 | |||
| 605 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 606 | |||
| 607 | return 0; | ||
| 608 | } | ||
| 609 | |||
| 553 | /** | 610 | /** |
| 554 | * Wait for VBLANK. | 611 | * Wait for VBLANK. |
| 555 | * | 612 | * |
| @@ -609,6 +666,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, | |||
| 609 | goto done; | 666 | goto done; |
| 610 | } | 667 | } |
| 611 | 668 | ||
| 669 | if (flags & _DRM_VBLANK_EVENT) | ||
| 670 | return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); | ||
| 671 | |||
| 612 | if ((flags & _DRM_VBLANK_NEXTONMISS) && | 672 | if ((flags & _DRM_VBLANK_NEXTONMISS) && |
| 613 | (seq - vblwait->request.sequence) <= (1<<23)) { | 673 | (seq - vblwait->request.sequence) <= (1<<23)) { |
| 614 | vblwait->request.sequence = seq + 1; | 674 | vblwait->request.sequence = seq + 1; |
| @@ -641,6 +701,38 @@ done: | |||
| 641 | return ret; | 701 | return ret; |
| 642 | } | 702 | } |
| 643 | 703 | ||
| 704 | void drm_handle_vblank_events(struct drm_device *dev, int crtc) | ||
| 705 | { | ||
| 706 | struct drm_pending_vblank_event *e, *t; | ||
| 707 | struct timeval now; | ||
| 708 | unsigned long flags; | ||
| 709 | unsigned int seq; | ||
| 710 | |||
| 711 | do_gettimeofday(&now); | ||
| 712 | seq = drm_vblank_count(dev, crtc); | ||
| 713 | |||
| 714 | spin_lock_irqsave(&dev->event_lock, flags); | ||
| 715 | |||
| 716 | list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { | ||
| 717 | if (e->pipe != crtc) | ||
| 718 | continue; | ||
| 719 | if ((seq - e->event.sequence) > (1<<23)) | ||
| 720 | continue; | ||
| 721 | |||
| 722 | DRM_DEBUG("vblank event on %d, current %d\n", | ||
| 723 | e->event.sequence, seq); | ||
| 724 | |||
| 725 | e->event.sequence = seq; | ||
| 726 | e->event.tv_sec = now.tv_sec; | ||
| 727 | e->event.tv_usec = now.tv_usec; | ||
| 728 | drm_vblank_put(dev, e->pipe); | ||
| 729 | list_move_tail(&e->base.link, &e->base.file_priv->event_list); | ||
| 730 | wake_up_interruptible(&e->base.file_priv->event_wait); | ||
| 731 | } | ||
| 732 | |||
| 733 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 734 | } | ||
| 735 | |||
| 644 | /** | 736 | /** |
| 645 | * drm_handle_vblank - handle a vblank event | 737 | * drm_handle_vblank - handle a vblank event |
| 646 | * @dev: DRM device | 738 | * @dev: DRM device |
| @@ -651,7 +743,11 @@ done: | |||
| 651 | */ | 743 | */ |
| 652 | void drm_handle_vblank(struct drm_device *dev, int crtc) | 744 | void drm_handle_vblank(struct drm_device *dev, int crtc) |
| 653 | { | 745 | { |
| 746 | if (!dev->num_crtcs) | ||
| 747 | return; | ||
| 748 | |||
| 654 | atomic_inc(&dev->_vblank_count[crtc]); | 749 | atomic_inc(&dev->_vblank_count[crtc]); |
| 655 | DRM_WAKEUP(&dev->vbl_queue[crtc]); | 750 | DRM_WAKEUP(&dev->vbl_queue[crtc]); |
| 751 | drm_handle_vblank_events(dev, crtc); | ||
| 656 | } | 752 | } |
| 657 | EXPORT_SYMBOL(drm_handle_vblank); | 753 | EXPORT_SYMBOL(drm_handle_vblank); |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 97dc5a4f0de4..1f0d717dbad6 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
| @@ -395,7 +395,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) | |||
| 395 | else | 395 | else |
| 396 | total_used += entry->size; | 396 | total_used += entry->size; |
| 397 | } | 397 | } |
| 398 | seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); | 398 | seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free); |
| 399 | return 0; | 399 | return 0; |
| 400 | } | 400 | } |
| 401 | EXPORT_SYMBOL(drm_mm_dump_table); | 401 | EXPORT_SYMBOL(drm_mm_dump_table); |
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 51f677215f1d..6d81a02463a3 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
| @@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode) | |||
| 553 | } | 553 | } |
| 554 | EXPORT_SYMBOL(drm_mode_height); | 554 | EXPORT_SYMBOL(drm_mode_height); |
| 555 | 555 | ||
| 556 | /** drm_mode_hsync - get the hsync of a mode | ||
| 557 | * @mode: mode | ||
| 558 | * | ||
| 559 | * LOCKING: | ||
| 560 | * None. | ||
| 561 | * | ||
| 562 | * Return @modes's hsync rate in kHz, rounded to the nearest int. | ||
| 563 | */ | ||
| 564 | int drm_mode_hsync(struct drm_display_mode *mode) | ||
| 565 | { | ||
| 566 | unsigned int calc_val; | ||
| 567 | |||
| 568 | if (mode->hsync) | ||
| 569 | return mode->hsync; | ||
| 570 | |||
| 571 | if (mode->htotal < 0) | ||
| 572 | return 0; | ||
| 573 | |||
| 574 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | ||
| 575 | calc_val += 500; /* round to 1000Hz */ | ||
| 576 | calc_val /= 1000; /* truncate to kHz */ | ||
| 577 | |||
| 578 | return calc_val; | ||
| 579 | } | ||
| 580 | EXPORT_SYMBOL(drm_mode_hsync); | ||
| 581 | |||
| 556 | /** | 582 | /** |
| 557 | * drm_mode_vrefresh - get the vrefresh of a mode | 583 | * drm_mode_vrefresh - get the vrefresh of a mode |
| 558 | * @mode: mode | 584 | * @mode: mode |
| @@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height); | |||
| 560 | * LOCKING: | 586 | * LOCKING: |
| 561 | * None. | 587 | * None. |
| 562 | * | 588 | * |
| 563 | * Return @mode's vrefresh rate or calculate it if necessary. | 589 | * Return @mode's vrefresh rate in Hz or calculate it if necessary. |
| 564 | * | 590 | * |
| 565 | * FIXME: why is this needed? shouldn't vrefresh be set already? | 591 | * FIXME: why is this needed? shouldn't vrefresh be set already? |
| 566 | * | 592 | * |
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index 55bb8a82d612..ad73e141afdb 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c | |||
| @@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master) | |||
| 128 | kref_get(&master->refcount); | 128 | kref_get(&master->refcount); |
| 129 | return master; | 129 | return master; |
| 130 | } | 130 | } |
| 131 | EXPORT_SYMBOL(drm_master_get); | ||
| 131 | 132 | ||
| 132 | static void drm_master_destroy(struct kref *kref) | 133 | static void drm_master_destroy(struct kref *kref) |
| 133 | { | 134 | { |
| @@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master) | |||
| 170 | kref_put(&(*master)->refcount, drm_master_destroy); | 171 | kref_put(&(*master)->refcount, drm_master_destroy); |
| 171 | *master = NULL; | 172 | *master = NULL; |
| 172 | } | 173 | } |
| 174 | EXPORT_SYMBOL(drm_master_put); | ||
| 173 | 175 | ||
| 174 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, | 176 | int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
| 175 | struct drm_file *file_priv) | 177 | struct drm_file *file_priv) |
| 176 | { | 178 | { |
| 179 | int ret = 0; | ||
| 180 | |||
| 177 | if (file_priv->is_master) | 181 | if (file_priv->is_master) |
| 178 | return 0; | 182 | return 0; |
| 179 | 183 | ||
| @@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data, | |||
| 188 | mutex_lock(&dev->struct_mutex); | 192 | mutex_lock(&dev->struct_mutex); |
| 189 | file_priv->minor->master = drm_master_get(file_priv->master); | 193 | file_priv->minor->master = drm_master_get(file_priv->master); |
| 190 | file_priv->is_master = 1; | 194 | file_priv->is_master = 1; |
| 195 | if (dev->driver->master_set) { | ||
| 196 | ret = dev->driver->master_set(dev, file_priv, false); | ||
| 197 | if (unlikely(ret != 0)) { | ||
| 198 | file_priv->is_master = 0; | ||
| 199 | drm_master_put(&file_priv->minor->master); | ||
| 200 | } | ||
| 201 | } | ||
| 191 | mutex_unlock(&dev->struct_mutex); | 202 | mutex_unlock(&dev->struct_mutex); |
| 192 | } | 203 | } |
| 193 | 204 | ||
| @@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, | |||
| 204 | return -EINVAL; | 215 | return -EINVAL; |
| 205 | 216 | ||
| 206 | mutex_lock(&dev->struct_mutex); | 217 | mutex_lock(&dev->struct_mutex); |
| 218 | if (dev->driver->master_drop) | ||
| 219 | dev->driver->master_drop(dev, file_priv, false); | ||
| 207 | drm_master_put(&file_priv->minor->master); | 220 | drm_master_put(&file_priv->minor->master); |
| 208 | file_priv->is_master = 0; | 221 | file_priv->is_master = 0; |
| 209 | mutex_unlock(&dev->struct_mutex); | 222 | mutex_unlock(&dev->struct_mutex); |
| @@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | |||
| 220 | INIT_LIST_HEAD(&dev->ctxlist); | 233 | INIT_LIST_HEAD(&dev->ctxlist); |
| 221 | INIT_LIST_HEAD(&dev->vmalist); | 234 | INIT_LIST_HEAD(&dev->vmalist); |
| 222 | INIT_LIST_HEAD(&dev->maplist); | 235 | INIT_LIST_HEAD(&dev->maplist); |
| 236 | INIT_LIST_HEAD(&dev->vblank_event_list); | ||
| 223 | 237 | ||
| 224 | spin_lock_init(&dev->count_lock); | 238 | spin_lock_init(&dev->count_lock); |
| 225 | spin_lock_init(&dev->drw_lock); | 239 | spin_lock_init(&dev->drw_lock); |
| 240 | spin_lock_init(&dev->event_lock); | ||
| 226 | init_timer(&dev->timer); | 241 | init_timer(&dev->timer); |
| 227 | mutex_init(&dev->struct_mutex); | 242 | mutex_init(&dev->struct_mutex); |
| 228 | mutex_init(&dev->ctxlist_mutex); | 243 | mutex_init(&dev->ctxlist_mutex); |
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index fa7b9be096bc..e3d049229cdd 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
| @@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
| 15 | intel_lvds.o \ | 15 | intel_lvds.o \ |
| 16 | intel_bios.o \ | 16 | intel_bios.o \ |
| 17 | intel_dp.o \ | 17 | intel_dp.o \ |
| 18 | intel_dp_i2c.o \ | ||
| 19 | intel_hdmi.o \ | 18 | intel_hdmi.o \ |
| 20 | intel_sdvo.o \ | 19 | intel_sdvo.o \ |
| 21 | intel_modes.o \ | 20 | intel_modes.o \ |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 7f436ec075f6..2fa217862058 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -333,6 +333,7 @@ static struct drm_driver driver = { | |||
| 333 | .mmap = drm_gem_mmap, | 333 | .mmap = drm_gem_mmap, |
| 334 | .poll = drm_poll, | 334 | .poll = drm_poll, |
| 335 | .fasync = drm_fasync, | 335 | .fasync = drm_fasync, |
| 336 | .read = drm_read, | ||
| 336 | #ifdef CONFIG_COMPAT | 337 | #ifdef CONFIG_COMPAT |
| 337 | .compat_ioctl = i915_compat_ioctl, | 338 | .compat_ioctl = i915_compat_ioctl, |
| 338 | #endif | 339 | #endif |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 099f420de57a..897230832c8c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #include "intel_drv.h" | 32 | #include "intel_drv.h" |
| 33 | #include "i915_drm.h" | 33 | #include "i915_drm.h" |
| 34 | #include "i915_drv.h" | 34 | #include "i915_drv.h" |
| 35 | #include "intel_dp.h" | 35 | #include "drm_dp_helper.h" |
| 36 | 36 | ||
| 37 | #include "drm_crtc_helper.h" | 37 | #include "drm_crtc_helper.h" |
| 38 | 38 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d83447557f9b..63424d5db9c6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
| 34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
| 35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
| 36 | #include "intel_dp.h" | 36 | #include "drm_dp_helper.h" |
| 37 | 37 | ||
| 38 | #define DP_LINK_STATUS_SIZE 6 | 38 | #define DP_LINK_STATUS_SIZE 6 |
| 39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 39 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
| @@ -382,17 +382,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output, | |||
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | static int | 384 | static int |
| 385 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, | 385 | intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, |
| 386 | uint8_t *send, int send_bytes, | 386 | uint8_t write_byte, uint8_t *read_byte) |
| 387 | uint8_t *recv, int recv_bytes) | ||
| 388 | { | 387 | { |
| 388 | struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; | ||
| 389 | struct intel_dp_priv *dp_priv = container_of(adapter, | 389 | struct intel_dp_priv *dp_priv = container_of(adapter, |
| 390 | struct intel_dp_priv, | 390 | struct intel_dp_priv, |
| 391 | adapter); | 391 | adapter); |
| 392 | struct intel_output *intel_output = dp_priv->intel_output; | 392 | struct intel_output *intel_output = dp_priv->intel_output; |
| 393 | uint16_t address = algo_data->address; | ||
| 394 | uint8_t msg[5]; | ||
| 395 | uint8_t reply[2]; | ||
| 396 | int msg_bytes; | ||
| 397 | int reply_bytes; | ||
| 398 | int ret; | ||
| 399 | |||
| 400 | /* Set up the command byte */ | ||
| 401 | if (mode & MODE_I2C_READ) | ||
| 402 | msg[0] = AUX_I2C_READ << 4; | ||
| 403 | else | ||
| 404 | msg[0] = AUX_I2C_WRITE << 4; | ||
| 405 | |||
| 406 | if (!(mode & MODE_I2C_STOP)) | ||
| 407 | msg[0] |= AUX_I2C_MOT << 4; | ||
| 408 | |||
| 409 | msg[1] = address >> 8; | ||
| 410 | msg[2] = address; | ||
| 411 | |||
| 412 | switch (mode) { | ||
| 413 | case MODE_I2C_WRITE: | ||
| 414 | msg[3] = 0; | ||
| 415 | msg[4] = write_byte; | ||
| 416 | msg_bytes = 5; | ||
| 417 | reply_bytes = 1; | ||
| 418 | break; | ||
| 419 | case MODE_I2C_READ: | ||
| 420 | msg[3] = 0; | ||
| 421 | msg_bytes = 4; | ||
| 422 | reply_bytes = 2; | ||
| 423 | break; | ||
| 424 | default: | ||
| 425 | msg_bytes = 3; | ||
| 426 | reply_bytes = 1; | ||
| 427 | break; | ||
| 428 | } | ||
| 393 | 429 | ||
| 394 | return intel_dp_aux_ch(intel_output, | 430 | for (;;) { |
| 395 | send, send_bytes, recv, recv_bytes); | 431 | ret = intel_dp_aux_ch(intel_output, |
| 432 | msg, msg_bytes, | ||
| 433 | reply, reply_bytes); | ||
| 434 | if (ret < 0) { | ||
| 435 | DRM_DEBUG("aux_ch failed %d\n", ret); | ||
| 436 | return ret; | ||
| 437 | } | ||
| 438 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | ||
| 439 | case AUX_I2C_REPLY_ACK: | ||
| 440 | if (mode == MODE_I2C_READ) { | ||
| 441 | *read_byte = reply[1]; | ||
| 442 | } | ||
| 443 | return reply_bytes - 1; | ||
| 444 | case AUX_I2C_REPLY_NACK: | ||
| 445 | DRM_DEBUG("aux_ch nack\n"); | ||
| 446 | return -EREMOTEIO; | ||
| 447 | case AUX_I2C_REPLY_DEFER: | ||
| 448 | DRM_DEBUG("aux_ch defer\n"); | ||
| 449 | udelay(100); | ||
| 450 | break; | ||
| 451 | default: | ||
| 452 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | ||
| 453 | return -EREMOTEIO; | ||
| 454 | } | ||
| 455 | } | ||
| 396 | } | 456 | } |
| 397 | 457 | ||
| 398 | static int | 458 | static int |
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b0a9de7a57c2..1e138f5bae09 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | ccflags-y := -Iinclude/drm | 4 | ccflags-y := -Iinclude/drm |
| 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ | 5 | ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ |
| 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o | 6 | ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ |
| 7 | ttm_object.o ttm_lock.o ttm_execbuf_util.o | ||
| 7 | 8 | ||
| 8 | obj-$(CONFIG_DRM_TTM) += ttm.o | 9 | obj-$(CONFIG_DRM_TTM) += ttm.o |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 87c06252d464..e13fd23f3334 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
| @@ -275,9 +275,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |||
| 275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | 275 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, |
| 276 | page_flags | TTM_PAGE_FLAG_USER, | 276 | page_flags | TTM_PAGE_FLAG_USER, |
| 277 | glob->dummy_read_page); | 277 | glob->dummy_read_page); |
| 278 | if (unlikely(bo->ttm == NULL)) | 278 | if (unlikely(bo->ttm == NULL)) { |
| 279 | ret = -ENOMEM; | 279 | ret = -ENOMEM; |
| 280 | break; | 280 | break; |
| 281 | } | ||
| 281 | 282 | ||
| 282 | ret = ttm_tt_set_user(bo->ttm, current, | 283 | ret = ttm_tt_set_user(bo->ttm, current, |
| 283 | bo->buffer_start, bo->num_pages); | 284 | bo->buffer_start, bo->num_pages); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index c70927ecda21..ceae52f45c39 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
| @@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
| 369 | #endif | 369 | #endif |
| 370 | return tmp; | 370 | return tmp; |
| 371 | } | 371 | } |
| 372 | EXPORT_SYMBOL(ttm_io_prot); | ||
| 372 | 373 | ||
| 373 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, | 374 | static int ttm_bo_ioremap(struct ttm_buffer_object *bo, |
| 374 | unsigned long bus_base, | 375 | unsigned long bus_base, |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c new file mode 100644 index 000000000000..c285c2902d15 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
| @@ -0,0 +1,117 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "ttm/ttm_execbuf_util.h" | ||
| 29 | #include "ttm/ttm_bo_driver.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | #include <linux/wait.h> | ||
| 32 | #include <linux/sched.h> | ||
| 33 | #include <linux/module.h> | ||
| 34 | |||
| 35 | void ttm_eu_backoff_reservation(struct list_head *list) | ||
| 36 | { | ||
| 37 | struct ttm_validate_buffer *entry; | ||
| 38 | |||
| 39 | list_for_each_entry(entry, list, head) { | ||
| 40 | struct ttm_buffer_object *bo = entry->bo; | ||
| 41 | if (!entry->reserved) | ||
| 42 | continue; | ||
| 43 | |||
| 44 | entry->reserved = false; | ||
| 45 | ttm_bo_unreserve(bo); | ||
| 46 | } | ||
| 47 | } | ||
| 48 | EXPORT_SYMBOL(ttm_eu_backoff_reservation); | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Reserve buffers for validation. | ||
| 52 | * | ||
| 53 | * If a buffer in the list is marked for CPU access, we back off and | ||
| 54 | * wait for that buffer to become free for GPU access. | ||
| 55 | * | ||
| 56 | * If a buffer is reserved for another validation, the validator with | ||
| 57 | * the highest validation sequence backs off and waits for that buffer | ||
| 58 | * to become unreserved. This prevents deadlocks when validating multiple | ||
| 59 | * buffers in different orders. | ||
| 60 | */ | ||
| 61 | |||
| 62 | int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq) | ||
| 63 | { | ||
| 64 | struct ttm_validate_buffer *entry; | ||
| 65 | int ret; | ||
| 66 | |||
| 67 | retry: | ||
| 68 | list_for_each_entry(entry, list, head) { | ||
| 69 | struct ttm_buffer_object *bo = entry->bo; | ||
| 70 | |||
| 71 | entry->reserved = false; | ||
| 72 | ret = ttm_bo_reserve(bo, true, false, true, val_seq); | ||
| 73 | if (ret != 0) { | ||
| 74 | ttm_eu_backoff_reservation(list); | ||
| 75 | if (ret == -EAGAIN) { | ||
| 76 | ret = ttm_bo_wait_unreserved(bo, true); | ||
| 77 | if (unlikely(ret != 0)) | ||
| 78 | return ret; | ||
| 79 | goto retry; | ||
| 80 | } else | ||
| 81 | return ret; | ||
| 82 | } | ||
| 83 | |||
| 84 | entry->reserved = true; | ||
| 85 | if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { | ||
| 86 | ttm_eu_backoff_reservation(list); | ||
| 87 | ret = ttm_bo_wait_cpu(bo, false); | ||
| 88 | if (ret) | ||
| 89 | return ret; | ||
| 90 | goto retry; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | ||
| 96 | |||
| 97 | void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) | ||
| 98 | { | ||
| 99 | struct ttm_validate_buffer *entry; | ||
| 100 | |||
| 101 | list_for_each_entry(entry, list, head) { | ||
| 102 | struct ttm_buffer_object *bo = entry->bo; | ||
| 103 | struct ttm_bo_driver *driver = bo->bdev->driver; | ||
| 104 | void *old_sync_obj; | ||
| 105 | |||
| 106 | spin_lock(&bo->lock); | ||
| 107 | old_sync_obj = bo->sync_obj; | ||
| 108 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
| 109 | bo->sync_obj_arg = entry->new_sync_obj_arg; | ||
| 110 | spin_unlock(&bo->lock); | ||
| 111 | ttm_bo_unreserve(bo); | ||
| 112 | entry->reserved = false; | ||
| 113 | if (old_sync_obj) | ||
| 114 | driver->sync_obj_unref(&old_sync_obj); | ||
| 115 | } | ||
| 116 | } | ||
| 117 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c new file mode 100644 index 000000000000..f619ebcaa4ec --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_lock.c | |||
| @@ -0,0 +1,311 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | /* | ||
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #include "ttm/ttm_lock.h" | ||
| 32 | #include "ttm/ttm_module.h" | ||
| 33 | #include <asm/atomic.h> | ||
| 34 | #include <linux/errno.h> | ||
| 35 | #include <linux/wait.h> | ||
| 36 | #include <linux/sched.h> | ||
| 37 | #include <linux/module.h> | ||
| 38 | |||
| 39 | #define TTM_WRITE_LOCK_PENDING (1 << 0) | ||
| 40 | #define TTM_VT_LOCK_PENDING (1 << 1) | ||
| 41 | #define TTM_SUSPEND_LOCK_PENDING (1 << 2) | ||
| 42 | #define TTM_VT_LOCK (1 << 3) | ||
| 43 | #define TTM_SUSPEND_LOCK (1 << 4) | ||
| 44 | |||
| 45 | void ttm_lock_init(struct ttm_lock *lock) | ||
| 46 | { | ||
| 47 | spin_lock_init(&lock->lock); | ||
| 48 | init_waitqueue_head(&lock->queue); | ||
| 49 | lock->rw = 0; | ||
| 50 | lock->flags = 0; | ||
| 51 | lock->kill_takers = false; | ||
| 52 | lock->signal = SIGKILL; | ||
| 53 | } | ||
| 54 | EXPORT_SYMBOL(ttm_lock_init); | ||
| 55 | |||
| 56 | void ttm_read_unlock(struct ttm_lock *lock) | ||
| 57 | { | ||
| 58 | spin_lock(&lock->lock); | ||
| 59 | if (--lock->rw == 0) | ||
| 60 | wake_up_all(&lock->queue); | ||
| 61 | spin_unlock(&lock->lock); | ||
| 62 | } | ||
| 63 | EXPORT_SYMBOL(ttm_read_unlock); | ||
| 64 | |||
| 65 | static bool __ttm_read_lock(struct ttm_lock *lock) | ||
| 66 | { | ||
| 67 | bool locked = false; | ||
| 68 | |||
| 69 | spin_lock(&lock->lock); | ||
| 70 | if (unlikely(lock->kill_takers)) { | ||
| 71 | send_sig(lock->signal, current, 0); | ||
| 72 | spin_unlock(&lock->lock); | ||
| 73 | return false; | ||
| 74 | } | ||
| 75 | if (lock->rw >= 0 && lock->flags == 0) { | ||
| 76 | ++lock->rw; | ||
| 77 | locked = true; | ||
| 78 | } | ||
| 79 | spin_unlock(&lock->lock); | ||
| 80 | return locked; | ||
| 81 | } | ||
| 82 | |||
| 83 | int ttm_read_lock(struct ttm_lock *lock, bool interruptible) | ||
| 84 | { | ||
| 85 | int ret = 0; | ||
| 86 | |||
| 87 | if (interruptible) | ||
| 88 | ret = wait_event_interruptible(lock->queue, | ||
| 89 | __ttm_read_lock(lock)); | ||
| 90 | else | ||
| 91 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(ttm_read_lock); | ||
| 95 | |||
| 96 | static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) | ||
| 97 | { | ||
| 98 | bool block = true; | ||
| 99 | |||
| 100 | *locked = false; | ||
| 101 | |||
| 102 | spin_lock(&lock->lock); | ||
| 103 | if (unlikely(lock->kill_takers)) { | ||
| 104 | send_sig(lock->signal, current, 0); | ||
| 105 | spin_unlock(&lock->lock); | ||
| 106 | return false; | ||
| 107 | } | ||
| 108 | if (lock->rw >= 0 && lock->flags == 0) { | ||
| 109 | ++lock->rw; | ||
| 110 | block = false; | ||
| 111 | *locked = true; | ||
| 112 | } else if (lock->flags == 0) { | ||
| 113 | block = false; | ||
| 114 | } | ||
| 115 | spin_unlock(&lock->lock); | ||
| 116 | |||
| 117 | return !block; | ||
| 118 | } | ||
| 119 | |||
| 120 | int ttm_read_trylock(struct ttm_lock *lock, bool interruptible) | ||
| 121 | { | ||
| 122 | int ret = 0; | ||
| 123 | bool locked; | ||
| 124 | |||
| 125 | if (interruptible) | ||
| 126 | ret = wait_event_interruptible | ||
| 127 | (lock->queue, __ttm_read_trylock(lock, &locked)); | ||
| 128 | else | ||
| 129 | wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); | ||
| 130 | |||
| 131 | if (unlikely(ret != 0)) { | ||
| 132 | BUG_ON(locked); | ||
| 133 | return ret; | ||
| 134 | } | ||
| 135 | |||
| 136 | return (locked) ? 0 : -EBUSY; | ||
| 137 | } | ||
| 138 | |||
| 139 | void ttm_write_unlock(struct ttm_lock *lock) | ||
| 140 | { | ||
| 141 | spin_lock(&lock->lock); | ||
| 142 | lock->rw = 0; | ||
| 143 | wake_up_all(&lock->queue); | ||
| 144 | spin_unlock(&lock->lock); | ||
| 145 | } | ||
| 146 | EXPORT_SYMBOL(ttm_write_unlock); | ||
| 147 | |||
| 148 | static bool __ttm_write_lock(struct ttm_lock *lock) | ||
| 149 | { | ||
| 150 | bool locked = false; | ||
| 151 | |||
| 152 | spin_lock(&lock->lock); | ||
| 153 | if (unlikely(lock->kill_takers)) { | ||
| 154 | send_sig(lock->signal, current, 0); | ||
| 155 | spin_unlock(&lock->lock); | ||
| 156 | return false; | ||
| 157 | } | ||
| 158 | if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { | ||
| 159 | lock->rw = -1; | ||
| 160 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
| 161 | locked = true; | ||
| 162 | } else { | ||
| 163 | lock->flags |= TTM_WRITE_LOCK_PENDING; | ||
| 164 | } | ||
| 165 | spin_unlock(&lock->lock); | ||
| 166 | return locked; | ||
| 167 | } | ||
| 168 | |||
| 169 | int ttm_write_lock(struct ttm_lock *lock, bool interruptible) | ||
| 170 | { | ||
| 171 | int ret = 0; | ||
| 172 | |||
| 173 | if (interruptible) { | ||
| 174 | ret = wait_event_interruptible(lock->queue, | ||
| 175 | __ttm_write_lock(lock)); | ||
| 176 | if (unlikely(ret != 0)) { | ||
| 177 | spin_lock(&lock->lock); | ||
| 178 | lock->flags &= ~TTM_WRITE_LOCK_PENDING; | ||
| 179 | wake_up_all(&lock->queue); | ||
| 180 | spin_unlock(&lock->lock); | ||
| 181 | } | ||
| 182 | } else | ||
| 183 | wait_event(lock->queue, __ttm_read_lock(lock)); | ||
| 184 | |||
| 185 | return ret; | ||
| 186 | } | ||
| 187 | EXPORT_SYMBOL(ttm_write_lock); | ||
| 188 | |||
| 189 | void ttm_write_lock_downgrade(struct ttm_lock *lock) | ||
| 190 | { | ||
| 191 | spin_lock(&lock->lock); | ||
| 192 | lock->rw = 1; | ||
| 193 | wake_up_all(&lock->queue); | ||
| 194 | spin_unlock(&lock->lock); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int __ttm_vt_unlock(struct ttm_lock *lock) | ||
| 198 | { | ||
| 199 | int ret = 0; | ||
| 200 | |||
| 201 | spin_lock(&lock->lock); | ||
| 202 | if (unlikely(!(lock->flags & TTM_VT_LOCK))) | ||
| 203 | ret = -EINVAL; | ||
| 204 | lock->flags &= ~TTM_VT_LOCK; | ||
| 205 | wake_up_all(&lock->queue); | ||
| 206 | spin_unlock(&lock->lock); | ||
| 207 | printk(KERN_INFO TTM_PFX "vt unlock.\n"); | ||
| 208 | |||
| 209 | return ret; | ||
| 210 | } | ||
| 211 | |||
| 212 | static void ttm_vt_lock_remove(struct ttm_base_object **p_base) | ||
| 213 | { | ||
| 214 | struct ttm_base_object *base = *p_base; | ||
| 215 | struct ttm_lock *lock = container_of(base, struct ttm_lock, base); | ||
| 216 | int ret; | ||
| 217 | |||
| 218 | *p_base = NULL; | ||
| 219 | ret = __ttm_vt_unlock(lock); | ||
| 220 | BUG_ON(ret != 0); | ||
| 221 | } | ||
| 222 | |||
| 223 | static bool __ttm_vt_lock(struct ttm_lock *lock) | ||
| 224 | { | ||
| 225 | bool locked = false; | ||
| 226 | |||
| 227 | spin_lock(&lock->lock); | ||
| 228 | if (lock->rw == 0) { | ||
| 229 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
| 230 | lock->flags |= TTM_VT_LOCK; | ||
| 231 | locked = true; | ||
| 232 | } else { | ||
| 233 | lock->flags |= TTM_VT_LOCK_PENDING; | ||
| 234 | } | ||
| 235 | spin_unlock(&lock->lock); | ||
| 236 | return locked; | ||
| 237 | } | ||
| 238 | |||
| 239 | int ttm_vt_lock(struct ttm_lock *lock, | ||
| 240 | bool interruptible, | ||
| 241 | struct ttm_object_file *tfile) | ||
| 242 | { | ||
| 243 | int ret = 0; | ||
| 244 | |||
| 245 | if (interruptible) { | ||
| 246 | ret = wait_event_interruptible(lock->queue, | ||
| 247 | __ttm_vt_lock(lock)); | ||
| 248 | if (unlikely(ret != 0)) { | ||
| 249 | spin_lock(&lock->lock); | ||
| 250 | lock->flags &= ~TTM_VT_LOCK_PENDING; | ||
| 251 | wake_up_all(&lock->queue); | ||
| 252 | spin_unlock(&lock->lock); | ||
| 253 | return ret; | ||
| 254 | } | ||
| 255 | } else | ||
| 256 | wait_event(lock->queue, __ttm_vt_lock(lock)); | ||
| 257 | |||
| 258 | /* | ||
| 259 | * Add a base-object, the destructor of which will | ||
| 260 | * make sure the lock is released if the client dies | ||
| 261 | * while holding it. | ||
| 262 | */ | ||
| 263 | |||
| 264 | ret = ttm_base_object_init(tfile, &lock->base, false, | ||
| 265 | ttm_lock_type, &ttm_vt_lock_remove, NULL); | ||
| 266 | if (ret) | ||
| 267 | (void)__ttm_vt_unlock(lock); | ||
| 268 | else { | ||
| 269 | lock->vt_holder = tfile; | ||
| 270 | printk(KERN_INFO TTM_PFX "vt lock.\n"); | ||
| 271 | } | ||
| 272 | |||
| 273 | return ret; | ||
| 274 | } | ||
| 275 | EXPORT_SYMBOL(ttm_vt_lock); | ||
| 276 | |||
| 277 | int ttm_vt_unlock(struct ttm_lock *lock) | ||
| 278 | { | ||
| 279 | return ttm_ref_object_base_unref(lock->vt_holder, | ||
| 280 | lock->base.hash.key, TTM_REF_USAGE); | ||
| 281 | } | ||
| 282 | EXPORT_SYMBOL(ttm_vt_unlock); | ||
| 283 | |||
| 284 | void ttm_suspend_unlock(struct ttm_lock *lock) | ||
| 285 | { | ||
| 286 | spin_lock(&lock->lock); | ||
| 287 | lock->flags &= ~TTM_SUSPEND_LOCK; | ||
| 288 | wake_up_all(&lock->queue); | ||
| 289 | spin_unlock(&lock->lock); | ||
| 290 | } | ||
| 291 | |||
| 292 | static bool __ttm_suspend_lock(struct ttm_lock *lock) | ||
| 293 | { | ||
| 294 | bool locked = false; | ||
| 295 | |||
| 296 | spin_lock(&lock->lock); | ||
| 297 | if (lock->rw == 0) { | ||
| 298 | lock->flags &= ~TTM_SUSPEND_LOCK_PENDING; | ||
| 299 | lock->flags |= TTM_SUSPEND_LOCK; | ||
| 300 | locked = true; | ||
| 301 | } else { | ||
| 302 | lock->flags |= TTM_SUSPEND_LOCK_PENDING; | ||
| 303 | } | ||
| 304 | spin_unlock(&lock->lock); | ||
| 305 | return locked; | ||
| 306 | } | ||
| 307 | |||
| 308 | void ttm_suspend_lock(struct ttm_lock *lock) | ||
| 309 | { | ||
| 310 | wait_event(lock->queue, __ttm_suspend_lock(lock)); | ||
| 311 | } | ||
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 072c281a6bb5..8bfde5f40841 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c | |||
| @@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, | |||
| 274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, | 274 | static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, |
| 275 | const struct sysinfo *si) | 275 | const struct sysinfo *si) |
| 276 | { | 276 | { |
| 277 | struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); | 277 | struct ttm_mem_zone *zone; |
| 278 | uint64_t mem; | 278 | uint64_t mem; |
| 279 | int ret; | 279 | int ret; |
| 280 | 280 | ||
| 281 | if (unlikely(!zone)) | ||
| 282 | return -ENOMEM; | ||
| 283 | |||
| 284 | if (si->totalhigh == 0) | 281 | if (si->totalhigh == 0) |
| 285 | return 0; | 282 | return 0; |
| 286 | 283 | ||
| 284 | zone = kzalloc(sizeof(*zone), GFP_KERNEL); | ||
| 285 | if (unlikely(!zone)) | ||
| 286 | return -ENOMEM; | ||
| 287 | |||
| 287 | mem = si->totalram; | 288 | mem = si->totalram; |
| 288 | mem *= si->mem_unit; | 289 | mem *= si->mem_unit; |
| 289 | 290 | ||
| @@ -460,6 +461,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob, | |||
| 460 | { | 461 | { |
| 461 | return ttm_mem_global_free_zone(glob, NULL, amount); | 462 | return ttm_mem_global_free_zone(glob, NULL, amount); |
| 462 | } | 463 | } |
| 464 | EXPORT_SYMBOL(ttm_mem_global_free); | ||
| 463 | 465 | ||
| 464 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, | 466 | static int ttm_mem_global_reserve(struct ttm_mem_global *glob, |
| 465 | struct ttm_mem_zone *single_zone, | 467 | struct ttm_mem_zone *single_zone, |
| @@ -533,6 +535,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, | |||
| 533 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, | 535 | return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, |
| 534 | interruptible); | 536 | interruptible); |
| 535 | } | 537 | } |
| 538 | EXPORT_SYMBOL(ttm_mem_global_alloc); | ||
| 536 | 539 | ||
| 537 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, | 540 | int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, |
| 538 | struct page *page, | 541 | struct page *page, |
| @@ -588,3 +591,4 @@ size_t ttm_round_pot(size_t size) | |||
| 588 | } | 591 | } |
| 589 | return 0; | 592 | return 0; |
| 590 | } | 593 | } |
| 594 | EXPORT_SYMBOL(ttm_round_pot); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c new file mode 100644 index 000000000000..1099abac824b --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_object.c | |||
| @@ -0,0 +1,452 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | /* | ||
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
| 29 | */ | ||
| 30 | /** @file ttm_ref_object.c | ||
| 31 | * | ||
| 32 | * Base- and reference object implementation for the various | ||
| 33 | * ttm objects. Implements reference counting, minimal security checks | ||
| 34 | * and release on file close. | ||
| 35 | */ | ||
| 36 | |||
| 37 | /** | ||
| 38 | * struct ttm_object_file | ||
| 39 | * | ||
| 40 | * @tdev: Pointer to the ttm_object_device. | ||
| 41 | * | ||
| 42 | * @lock: Lock that protects the ref_list list and the | ||
| 43 | * ref_hash hash tables. | ||
| 44 | * | ||
| 45 | * @ref_list: List of ttm_ref_objects to be destroyed at | ||
| 46 | * file release. | ||
| 47 | * | ||
| 48 | * @ref_hash: Hash tables of ref objects, one per ttm_ref_type, | ||
| 49 | * for fast lookup of ref objects given a base object. | ||
| 50 | */ | ||
| 51 | |||
| 52 | #include "ttm/ttm_object.h" | ||
| 53 | #include "ttm/ttm_module.h" | ||
| 54 | #include <linux/list.h> | ||
| 55 | #include <linux/spinlock.h> | ||
| 56 | #include <linux/slab.h> | ||
| 57 | #include <linux/module.h> | ||
| 58 | #include <asm/atomic.h> | ||
| 59 | |||
| 60 | struct ttm_object_file { | ||
| 61 | struct ttm_object_device *tdev; | ||
| 62 | rwlock_t lock; | ||
| 63 | struct list_head ref_list; | ||
| 64 | struct drm_open_hash ref_hash[TTM_REF_NUM]; | ||
| 65 | struct kref refcount; | ||
| 66 | }; | ||
| 67 | |||
| 68 | /** | ||
| 69 | * struct ttm_object_device | ||
| 70 | * | ||
| 71 | * @object_lock: lock that protects the object_hash hash table. | ||
| 72 | * | ||
| 73 | * @object_hash: hash table for fast lookup of object global names. | ||
| 74 | * | ||
| 75 | * @object_count: Per device object count. | ||
| 76 | * | ||
| 77 | * This is the per-device data structure needed for ttm object management. | ||
| 78 | */ | ||
| 79 | |||
| 80 | struct ttm_object_device { | ||
| 81 | rwlock_t object_lock; | ||
| 82 | struct drm_open_hash object_hash; | ||
| 83 | atomic_t object_count; | ||
| 84 | struct ttm_mem_global *mem_glob; | ||
| 85 | }; | ||
| 86 | |||
| 87 | /** | ||
| 88 | * struct ttm_ref_object | ||
| 89 | * | ||
| 90 | * @hash: Hash entry for the per-file object reference hash. | ||
| 91 | * | ||
| 92 | * @head: List entry for the per-file list of ref-objects. | ||
| 93 | * | ||
| 94 | * @kref: Ref count. | ||
| 95 | * | ||
| 96 | * @obj: Base object this ref object is referencing. | ||
| 97 | * | ||
| 98 | * @ref_type: Type of ref object. | ||
| 99 | * | ||
| 100 | * This is similar to an idr object, but it also has a hash table entry | ||
| 101 | * that allows lookup with a pointer to the referenced object as a key. In | ||
| 102 | * that way, one can easily detect whether a base object is referenced by | ||
| 103 | * a particular ttm_object_file. It also carries a ref count to avoid creating | ||
| 104 | * multiple ref objects if a ttm_object_file references the same base | ||
| 105 | * object more than once. | ||
| 106 | */ | ||
| 107 | |||
| 108 | struct ttm_ref_object { | ||
| 109 | struct drm_hash_item hash; | ||
| 110 | struct list_head head; | ||
| 111 | struct kref kref; | ||
| 112 | struct ttm_base_object *obj; | ||
| 113 | enum ttm_ref_type ref_type; | ||
| 114 | struct ttm_object_file *tfile; | ||
| 115 | }; | ||
| 116 | |||
| 117 | static inline struct ttm_object_file * | ||
| 118 | ttm_object_file_ref(struct ttm_object_file *tfile) | ||
| 119 | { | ||
| 120 | kref_get(&tfile->refcount); | ||
| 121 | return tfile; | ||
| 122 | } | ||
| 123 | |||
| 124 | static void ttm_object_file_destroy(struct kref *kref) | ||
| 125 | { | ||
| 126 | struct ttm_object_file *tfile = | ||
| 127 | container_of(kref, struct ttm_object_file, refcount); | ||
| 128 | |||
| 129 | kfree(tfile); | ||
| 130 | } | ||
| 131 | |||
| 132 | |||
| 133 | static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile) | ||
| 134 | { | ||
| 135 | struct ttm_object_file *tfile = *p_tfile; | ||
| 136 | |||
| 137 | *p_tfile = NULL; | ||
| 138 | kref_put(&tfile->refcount, ttm_object_file_destroy); | ||
| 139 | } | ||
| 140 | |||
| 141 | |||
| 142 | int ttm_base_object_init(struct ttm_object_file *tfile, | ||
| 143 | struct ttm_base_object *base, | ||
| 144 | bool shareable, | ||
| 145 | enum ttm_object_type object_type, | ||
| 146 | void (*refcount_release) (struct ttm_base_object **), | ||
| 147 | void (*ref_obj_release) (struct ttm_base_object *, | ||
| 148 | enum ttm_ref_type ref_type)) | ||
| 149 | { | ||
| 150 | struct ttm_object_device *tdev = tfile->tdev; | ||
| 151 | int ret; | ||
| 152 | |||
| 153 | base->shareable = shareable; | ||
| 154 | base->tfile = ttm_object_file_ref(tfile); | ||
| 155 | base->refcount_release = refcount_release; | ||
| 156 | base->ref_obj_release = ref_obj_release; | ||
| 157 | base->object_type = object_type; | ||
| 158 | write_lock(&tdev->object_lock); | ||
| 159 | kref_init(&base->refcount); | ||
| 160 | ret = drm_ht_just_insert_please(&tdev->object_hash, | ||
| 161 | &base->hash, | ||
| 162 | (unsigned long)base, 31, 0, 0); | ||
| 163 | write_unlock(&tdev->object_lock); | ||
| 164 | if (unlikely(ret != 0)) | ||
| 165 | goto out_err0; | ||
| 166 | |||
| 167 | ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); | ||
| 168 | if (unlikely(ret != 0)) | ||
| 169 | goto out_err1; | ||
| 170 | |||
| 171 | ttm_base_object_unref(&base); | ||
| 172 | |||
| 173 | return 0; | ||
| 174 | out_err1: | ||
| 175 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
| 176 | out_err0: | ||
| 177 | return ret; | ||
| 178 | } | ||
| 179 | EXPORT_SYMBOL(ttm_base_object_init); | ||
| 180 | |||
| 181 | static void ttm_release_base(struct kref *kref) | ||
| 182 | { | ||
| 183 | struct ttm_base_object *base = | ||
| 184 | container_of(kref, struct ttm_base_object, refcount); | ||
| 185 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
| 186 | |||
| 187 | (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); | ||
| 188 | write_unlock(&tdev->object_lock); | ||
| 189 | if (base->refcount_release) { | ||
| 190 | ttm_object_file_unref(&base->tfile); | ||
| 191 | base->refcount_release(&base); | ||
| 192 | } | ||
| 193 | write_lock(&tdev->object_lock); | ||
| 194 | } | ||
| 195 | |||
| 196 | void ttm_base_object_unref(struct ttm_base_object **p_base) | ||
| 197 | { | ||
| 198 | struct ttm_base_object *base = *p_base; | ||
| 199 | struct ttm_object_device *tdev = base->tfile->tdev; | ||
| 200 | |||
| 201 | *p_base = NULL; | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Need to take the lock here to avoid racing with | ||
| 205 | * users trying to look up the object. | ||
| 206 | */ | ||
| 207 | |||
| 208 | write_lock(&tdev->object_lock); | ||
| 209 | (void)kref_put(&base->refcount, &ttm_release_base); | ||
| 210 | write_unlock(&tdev->object_lock); | ||
| 211 | } | ||
| 212 | EXPORT_SYMBOL(ttm_base_object_unref); | ||
| 213 | |||
| 214 | struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, | ||
| 215 | uint32_t key) | ||
| 216 | { | ||
| 217 | struct ttm_object_device *tdev = tfile->tdev; | ||
| 218 | struct ttm_base_object *base; | ||
| 219 | struct drm_hash_item *hash; | ||
| 220 | int ret; | ||
| 221 | |||
| 222 | read_lock(&tdev->object_lock); | ||
| 223 | ret = drm_ht_find_item(&tdev->object_hash, key, &hash); | ||
| 224 | |||
| 225 | if (likely(ret == 0)) { | ||
| 226 | base = drm_hash_entry(hash, struct ttm_base_object, hash); | ||
| 227 | kref_get(&base->refcount); | ||
| 228 | } | ||
| 229 | read_unlock(&tdev->object_lock); | ||
| 230 | |||
| 231 | if (unlikely(ret != 0)) | ||
| 232 | return NULL; | ||
| 233 | |||
| 234 | if (tfile != base->tfile && !base->shareable) { | ||
| 235 | printk(KERN_ERR TTM_PFX | ||
| 236 | "Attempted access of non-shareable object.\n"); | ||
| 237 | ttm_base_object_unref(&base); | ||
| 238 | return NULL; | ||
| 239 | } | ||
| 240 | |||
| 241 | return base; | ||
| 242 | } | ||
| 243 | EXPORT_SYMBOL(ttm_base_object_lookup); | ||
| 244 | |||
| 245 | int ttm_ref_object_add(struct ttm_object_file *tfile, | ||
| 246 | struct ttm_base_object *base, | ||
| 247 | enum ttm_ref_type ref_type, bool *existed) | ||
| 248 | { | ||
| 249 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
| 250 | struct ttm_ref_object *ref; | ||
| 251 | struct drm_hash_item *hash; | ||
| 252 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
| 253 | int ret = -EINVAL; | ||
| 254 | |||
| 255 | if (existed != NULL) | ||
| 256 | *existed = true; | ||
| 257 | |||
| 258 | while (ret == -EINVAL) { | ||
| 259 | read_lock(&tfile->lock); | ||
| 260 | ret = drm_ht_find_item(ht, base->hash.key, &hash); | ||
| 261 | |||
| 262 | if (ret == 0) { | ||
| 263 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
| 264 | kref_get(&ref->kref); | ||
| 265 | read_unlock(&tfile->lock); | ||
| 266 | break; | ||
| 267 | } | ||
| 268 | |||
| 269 | read_unlock(&tfile->lock); | ||
| 270 | ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), | ||
| 271 | false, false); | ||
| 272 | if (unlikely(ret != 0)) | ||
| 273 | return ret; | ||
| 274 | ref = kmalloc(sizeof(*ref), GFP_KERNEL); | ||
| 275 | if (unlikely(ref == NULL)) { | ||
| 276 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
| 277 | return -ENOMEM; | ||
| 278 | } | ||
| 279 | |||
| 280 | ref->hash.key = base->hash.key; | ||
| 281 | ref->obj = base; | ||
| 282 | ref->tfile = tfile; | ||
| 283 | ref->ref_type = ref_type; | ||
| 284 | kref_init(&ref->kref); | ||
| 285 | |||
| 286 | write_lock(&tfile->lock); | ||
| 287 | ret = drm_ht_insert_item(ht, &ref->hash); | ||
| 288 | |||
| 289 | if (likely(ret == 0)) { | ||
| 290 | list_add_tail(&ref->head, &tfile->ref_list); | ||
| 291 | kref_get(&base->refcount); | ||
| 292 | write_unlock(&tfile->lock); | ||
| 293 | if (existed != NULL) | ||
| 294 | *existed = false; | ||
| 295 | break; | ||
| 296 | } | ||
| 297 | |||
| 298 | write_unlock(&tfile->lock); | ||
| 299 | BUG_ON(ret != -EINVAL); | ||
| 300 | |||
| 301 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
| 302 | kfree(ref); | ||
| 303 | } | ||
| 304 | |||
| 305 | return ret; | ||
| 306 | } | ||
| 307 | EXPORT_SYMBOL(ttm_ref_object_add); | ||
| 308 | |||
| 309 | static void ttm_ref_object_release(struct kref *kref) | ||
| 310 | { | ||
| 311 | struct ttm_ref_object *ref = | ||
| 312 | container_of(kref, struct ttm_ref_object, kref); | ||
| 313 | struct ttm_base_object *base = ref->obj; | ||
| 314 | struct ttm_object_file *tfile = ref->tfile; | ||
| 315 | struct drm_open_hash *ht; | ||
| 316 | struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; | ||
| 317 | |||
| 318 | ht = &tfile->ref_hash[ref->ref_type]; | ||
| 319 | (void)drm_ht_remove_item(ht, &ref->hash); | ||
| 320 | list_del(&ref->head); | ||
| 321 | write_unlock(&tfile->lock); | ||
| 322 | |||
| 323 | if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) | ||
| 324 | base->ref_obj_release(base, ref->ref_type); | ||
| 325 | |||
| 326 | ttm_base_object_unref(&ref->obj); | ||
| 327 | ttm_mem_global_free(mem_glob, sizeof(*ref)); | ||
| 328 | kfree(ref); | ||
| 329 | write_lock(&tfile->lock); | ||
| 330 | } | ||
| 331 | |||
| 332 | int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | ||
| 333 | unsigned long key, enum ttm_ref_type ref_type) | ||
| 334 | { | ||
| 335 | struct drm_open_hash *ht = &tfile->ref_hash[ref_type]; | ||
| 336 | struct ttm_ref_object *ref; | ||
| 337 | struct drm_hash_item *hash; | ||
| 338 | int ret; | ||
| 339 | |||
| 340 | write_lock(&tfile->lock); | ||
| 341 | ret = drm_ht_find_item(ht, key, &hash); | ||
| 342 | if (unlikely(ret != 0)) { | ||
| 343 | write_unlock(&tfile->lock); | ||
| 344 | return -EINVAL; | ||
| 345 | } | ||
| 346 | ref = drm_hash_entry(hash, struct ttm_ref_object, hash); | ||
| 347 | kref_put(&ref->kref, ttm_ref_object_release); | ||
| 348 | write_unlock(&tfile->lock); | ||
| 349 | return 0; | ||
| 350 | } | ||
| 351 | EXPORT_SYMBOL(ttm_ref_object_base_unref); | ||
| 352 | |||
| 353 | void ttm_object_file_release(struct ttm_object_file **p_tfile) | ||
| 354 | { | ||
| 355 | struct ttm_ref_object *ref; | ||
| 356 | struct list_head *list; | ||
| 357 | unsigned int i; | ||
| 358 | struct ttm_object_file *tfile = *p_tfile; | ||
| 359 | |||
| 360 | *p_tfile = NULL; | ||
| 361 | write_lock(&tfile->lock); | ||
| 362 | |||
| 363 | /* | ||
| 364 | * Since we release the lock within the loop, we have to | ||
| 365 | * restart it from the beginning each time. | ||
| 366 | */ | ||
| 367 | |||
| 368 | while (!list_empty(&tfile->ref_list)) { | ||
| 369 | list = tfile->ref_list.next; | ||
| 370 | ref = list_entry(list, struct ttm_ref_object, head); | ||
| 371 | ttm_ref_object_release(&ref->kref); | ||
| 372 | } | ||
| 373 | |||
| 374 | for (i = 0; i < TTM_REF_NUM; ++i) | ||
| 375 | drm_ht_remove(&tfile->ref_hash[i]); | ||
| 376 | |||
| 377 | write_unlock(&tfile->lock); | ||
| 378 | ttm_object_file_unref(&tfile); | ||
| 379 | } | ||
| 380 | EXPORT_SYMBOL(ttm_object_file_release); | ||
| 381 | |||
| 382 | struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev, | ||
| 383 | unsigned int hash_order) | ||
| 384 | { | ||
| 385 | struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | ||
| 386 | unsigned int i; | ||
| 387 | unsigned int j = 0; | ||
| 388 | int ret; | ||
| 389 | |||
| 390 | if (unlikely(tfile == NULL)) | ||
| 391 | return NULL; | ||
| 392 | |||
| 393 | rwlock_init(&tfile->lock); | ||
| 394 | tfile->tdev = tdev; | ||
| 395 | kref_init(&tfile->refcount); | ||
| 396 | INIT_LIST_HEAD(&tfile->ref_list); | ||
| 397 | |||
| 398 | for (i = 0; i < TTM_REF_NUM; ++i) { | ||
| 399 | ret = drm_ht_create(&tfile->ref_hash[i], hash_order); | ||
| 400 | if (ret) { | ||
| 401 | j = i; | ||
| 402 | goto out_err; | ||
| 403 | } | ||
| 404 | } | ||
| 405 | |||
| 406 | return tfile; | ||
| 407 | out_err: | ||
| 408 | for (i = 0; i < j; ++i) | ||
| 409 | drm_ht_remove(&tfile->ref_hash[i]); | ||
| 410 | |||
| 411 | kfree(tfile); | ||
| 412 | |||
| 413 | return NULL; | ||
| 414 | } | ||
| 415 | EXPORT_SYMBOL(ttm_object_file_init); | ||
| 416 | |||
| 417 | struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global | ||
| 418 | *mem_glob, | ||
| 419 | unsigned int hash_order) | ||
| 420 | { | ||
| 421 | struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL); | ||
| 422 | int ret; | ||
| 423 | |||
| 424 | if (unlikely(tdev == NULL)) | ||
| 425 | return NULL; | ||
| 426 | |||
| 427 | tdev->mem_glob = mem_glob; | ||
| 428 | rwlock_init(&tdev->object_lock); | ||
| 429 | atomic_set(&tdev->object_count, 0); | ||
| 430 | ret = drm_ht_create(&tdev->object_hash, hash_order); | ||
| 431 | |||
| 432 | if (likely(ret == 0)) | ||
| 433 | return tdev; | ||
| 434 | |||
| 435 | kfree(tdev); | ||
| 436 | return NULL; | ||
| 437 | } | ||
| 438 | EXPORT_SYMBOL(ttm_object_device_init); | ||
| 439 | |||
| 440 | void ttm_object_device_release(struct ttm_object_device **p_tdev) | ||
| 441 | { | ||
| 442 | struct ttm_object_device *tdev = *p_tdev; | ||
| 443 | |||
| 444 | *p_tdev = NULL; | ||
| 445 | |||
| 446 | write_lock(&tdev->object_lock); | ||
| 447 | drm_ht_remove(&tdev->object_hash); | ||
| 448 | write_unlock(&tdev->object_lock); | ||
| 449 | |||
| 450 | kfree(tdev); | ||
| 451 | } | ||
| 452 | EXPORT_SYMBOL(ttm_object_device_release); | ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7bcb89f39ce8..9c2b1cc5dba5 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c | |||
| @@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm) | |||
| 192 | ttm->state = tt_unbound; | 192 | ttm->state = tt_unbound; |
| 193 | return 0; | 193 | return 0; |
| 194 | } | 194 | } |
| 195 | EXPORT_SYMBOL(ttm_tt_populate); | ||
| 195 | 196 | ||
| 196 | #ifdef CONFIG_X86 | 197 | #ifdef CONFIG_X86 |
| 197 | static inline int ttm_tt_set_page_caching(struct page *p, | 198 | static inline int ttm_tt_set_page_caching(struct page *p, |
diff --git a/include/drm/drm.h b/include/drm/drm.h index 7cb50bdde46d..43a35b092f04 100644 --- a/include/drm/drm.h +++ b/include/drm/drm.h | |||
| @@ -36,17 +36,27 @@ | |||
| 36 | #ifndef _DRM_H_ | 36 | #ifndef _DRM_H_ |
| 37 | #define _DRM_H_ | 37 | #define _DRM_H_ |
| 38 | 38 | ||
| 39 | #if defined(__linux__) | ||
| 40 | |||
| 39 | #include <linux/types.h> | 41 | #include <linux/types.h> |
| 40 | #include <asm/ioctl.h> /* For _IO* macros */ | 42 | #include <asm/ioctl.h> |
| 41 | #define DRM_IOCTL_NR(n) _IOC_NR(n) | 43 | typedef unsigned int drm_handle_t; |
| 42 | #define DRM_IOC_VOID _IOC_NONE | ||
| 43 | #define DRM_IOC_READ _IOC_READ | ||
| 44 | #define DRM_IOC_WRITE _IOC_WRITE | ||
| 45 | #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE | ||
| 46 | #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) | ||
| 47 | 44 | ||
| 48 | #define DRM_MAJOR 226 | 45 | #else /* One of the BSDs */ |
| 49 | #define DRM_MAX_MINOR 15 | 46 | |
| 47 | #include <sys/ioccom.h> | ||
| 48 | #include <sys/types.h> | ||
| 49 | typedef int8_t __s8; | ||
| 50 | typedef uint8_t __u8; | ||
| 51 | typedef int16_t __s16; | ||
| 52 | typedef uint16_t __u16; | ||
| 53 | typedef int32_t __s32; | ||
| 54 | typedef uint32_t __u32; | ||
| 55 | typedef int64_t __s64; | ||
| 56 | typedef uint64_t __u64; | ||
| 57 | typedef unsigned long drm_handle_t; | ||
| 58 | |||
| 59 | #endif | ||
| 50 | 60 | ||
| 51 | #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ | 61 | #define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
| 52 | #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ | 62 | #define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ |
| @@ -59,7 +69,6 @@ | |||
| 59 | #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) | 69 | #define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) |
| 60 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) | 70 | #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) |
| 61 | 71 | ||
| 62 | typedef unsigned int drm_handle_t; | ||
| 63 | typedef unsigned int drm_context_t; | 72 | typedef unsigned int drm_context_t; |
| 64 | typedef unsigned int drm_drawable_t; | 73 | typedef unsigned int drm_drawable_t; |
| 65 | typedef unsigned int drm_magic_t; | 74 | typedef unsigned int drm_magic_t; |
| @@ -454,6 +463,7 @@ struct drm_irq_busid { | |||
| 454 | enum drm_vblank_seq_type { | 463 | enum drm_vblank_seq_type { |
| 455 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ | 464 | _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
| 456 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ | 465 | _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
| 466 | _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ | ||
| 457 | _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ | 467 | _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
| 458 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ | 468 | _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
| 459 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ | 469 | _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
| @@ -461,8 +471,8 @@ enum drm_vblank_seq_type { | |||
| 461 | }; | 471 | }; |
| 462 | 472 | ||
| 463 | #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) | 473 | #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
| 464 | #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ | 474 | #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
| 465 | _DRM_VBLANK_NEXTONMISS) | 475 | _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS) |
| 466 | 476 | ||
| 467 | struct drm_wait_vblank_request { | 477 | struct drm_wait_vblank_request { |
| 468 | enum drm_vblank_seq_type type; | 478 | enum drm_vblank_seq_type type; |
| @@ -686,6 +696,8 @@ struct drm_gem_open { | |||
| 686 | #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) | 696 | #define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) |
| 687 | #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) | 697 | #define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) |
| 688 | #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) | 698 | #define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) |
| 699 | #define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip) | ||
| 700 | #define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd) | ||
| 689 | 701 | ||
| 690 | /** | 702 | /** |
| 691 | * Device specific ioctls should only be in their respective headers | 703 | * Device specific ioctls should only be in their respective headers |
| @@ -698,6 +710,34 @@ struct drm_gem_open { | |||
| 698 | #define DRM_COMMAND_BASE 0x40 | 710 | #define DRM_COMMAND_BASE 0x40 |
| 699 | #define DRM_COMMAND_END 0xA0 | 711 | #define DRM_COMMAND_END 0xA0 |
| 700 | 712 | ||
| 713 | /** | ||
| 714 | * Header for events written back to userspace on the drm fd. The | ||
| 715 | * type defines the type of event, the length specifies the total | ||
| 716 | * length of the event (including the header), and user_data is | ||
| 717 | * typically a 64 bit value passed with the ioctl that triggered the | ||
| 718 | * event. A read on the drm fd will always only return complete | ||
| 719 | * events, that is, if for example the read buffer is 100 bytes, and | ||
| 720 | * there are two 64 byte events pending, only one will be returned. | ||
| 721 | * | ||
| 722 | * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and | ||
| 723 | * up are chipset specific. | ||
| 724 | */ | ||
| 725 | struct drm_event { | ||
| 726 | __u32 type; | ||
| 727 | __u32 length; | ||
| 728 | }; | ||
| 729 | |||
| 730 | #define DRM_EVENT_VBLANK 0x01 | ||
| 731 | |||
| 732 | struct drm_event_vblank { | ||
| 733 | struct drm_event base; | ||
| 734 | __u64 user_data; | ||
| 735 | __u32 tv_sec; | ||
| 736 | __u32 tv_usec; | ||
| 737 | __u32 sequence; | ||
| 738 | __u32 reserved; | ||
| 739 | }; | ||
| 740 | |||
| 701 | /* typedef area */ | 741 | /* typedef area */ |
| 702 | #ifndef __KERNEL__ | 742 | #ifndef __KERNEL__ |
| 703 | typedef struct drm_clip_rect drm_clip_rect_t; | 743 | typedef struct drm_clip_rect drm_clip_rect_t; |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index c8e64bbadbcf..db56a6add5de 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level, | |||
| 245 | 245 | ||
| 246 | #endif | 246 | #endif |
| 247 | 247 | ||
| 248 | #define DRM_PROC_LIMIT (PAGE_SIZE-80) | ||
| 249 | |||
| 250 | #define DRM_PROC_PRINT(fmt, arg...) \ | ||
| 251 | len += sprintf(&buf[len], fmt , ##arg); \ | ||
| 252 | if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } | ||
| 253 | |||
| 254 | #define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ | ||
| 255 | len += sprintf(&buf[len], fmt , ##arg); \ | ||
| 256 | if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } | ||
| 257 | |||
| 258 | /*@}*/ | 248 | /*@}*/ |
| 259 | 249 | ||
| 260 | /***********************************************************************/ | 250 | /***********************************************************************/ |
| @@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level, | |||
| 265 | 255 | ||
| 266 | #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) | 256 | #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) |
| 267 | #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) | 257 | #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) |
| 268 | #define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) | ||
| 269 | 258 | ||
| 270 | #define DRM_IF_VERSION(maj, min) (maj << 16 | min) | 259 | #define DRM_IF_VERSION(maj, min) (maj << 16 | min) |
| 271 | /** | ||
| 272 | * Get the private SAREA mapping. | ||
| 273 | * | ||
| 274 | * \param _dev DRM device. | ||
| 275 | * \param _ctx context number. | ||
| 276 | * \param _map output mapping. | ||
| 277 | */ | ||
| 278 | #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ | ||
| 279 | (_map) = (_dev)->context_sareas[_ctx]; \ | ||
| 280 | } while(0) | ||
| 281 | 260 | ||
| 282 | /** | 261 | /** |
| 283 | * Test that the hardware lock is held by the caller, returning otherwise. | 262 | * Test that the hardware lock is held by the caller, returning otherwise. |
| @@ -297,18 +276,6 @@ do { \ | |||
| 297 | } while (0) | 276 | } while (0) |
| 298 | 277 | ||
| 299 | /** | 278 | /** |
| 300 | * Copy and IOCTL return string to user space | ||
| 301 | */ | ||
| 302 | #define DRM_COPY( name, value ) \ | ||
| 303 | len = strlen( value ); \ | ||
| 304 | if ( len > name##_len ) len = name##_len; \ | ||
| 305 | name##_len = strlen( value ); \ | ||
| 306 | if ( len && name ) { \ | ||
| 307 | if ( copy_to_user( name, value, len ) ) \ | ||
| 308 | return -EFAULT; \ | ||
| 309 | } | ||
| 310 | |||
| 311 | /** | ||
| 312 | * Ioctl function type. | 279 | * Ioctl function type. |
| 313 | * | 280 | * |
| 314 | * \param inode device inode. | 281 | * \param inode device inode. |
| @@ -322,6 +289,9 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data, | |||
| 322 | typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, | 289 | typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, |
| 323 | unsigned long arg); | 290 | unsigned long arg); |
| 324 | 291 | ||
| 292 | #define DRM_IOCTL_NR(n) _IOC_NR(n) | ||
| 293 | #define DRM_MAJOR 226 | ||
| 294 | |||
| 325 | #define DRM_AUTH 0x1 | 295 | #define DRM_AUTH 0x1 |
| 326 | #define DRM_MASTER 0x2 | 296 | #define DRM_MASTER 0x2 |
| 327 | #define DRM_ROOT_ONLY 0x4 | 297 | #define DRM_ROOT_ONLY 0x4 |
| @@ -426,6 +396,14 @@ struct drm_buf_entry { | |||
| 426 | struct drm_freelist freelist; | 396 | struct drm_freelist freelist; |
| 427 | }; | 397 | }; |
| 428 | 398 | ||
| 399 | /* Event queued up for userspace to read */ | ||
| 400 | struct drm_pending_event { | ||
| 401 | struct drm_event *event; | ||
| 402 | struct list_head link; | ||
| 403 | struct drm_file *file_priv; | ||
| 404 | void (*destroy)(struct drm_pending_event *event); | ||
| 405 | }; | ||
| 406 | |||
| 429 | /** File private data */ | 407 | /** File private data */ |
| 430 | struct drm_file { | 408 | struct drm_file { |
| 431 | int authenticated; | 409 | int authenticated; |
| @@ -449,6 +427,10 @@ struct drm_file { | |||
| 449 | struct drm_master *master; /* master this node is currently associated with | 427 | struct drm_master *master; /* master this node is currently associated with |
| 450 | N.B. not always minor->master */ | 428 | N.B. not always minor->master */ |
| 451 | struct list_head fbs; | 429 | struct list_head fbs; |
| 430 | |||
| 431 | wait_queue_head_t event_wait; | ||
| 432 | struct list_head event_list; | ||
| 433 | int event_space; | ||
| 452 | }; | 434 | }; |
| 453 | 435 | ||
| 454 | /** Wait queue */ | 436 | /** Wait queue */ |
| @@ -795,6 +777,15 @@ struct drm_driver { | |||
| 795 | /* Master routines */ | 777 | /* Master routines */ |
| 796 | int (*master_create)(struct drm_device *dev, struct drm_master *master); | 778 | int (*master_create)(struct drm_device *dev, struct drm_master *master); |
| 797 | void (*master_destroy)(struct drm_device *dev, struct drm_master *master); | 779 | void (*master_destroy)(struct drm_device *dev, struct drm_master *master); |
| 780 | /** | ||
| 781 | * master_set is called whenever the minor master is set. | ||
| 782 | * master_drop is called whenever the minor master is dropped. | ||
| 783 | */ | ||
| 784 | |||
| 785 | int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, | ||
| 786 | bool from_open); | ||
| 787 | void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, | ||
| 788 | bool from_release); | ||
| 798 | 789 | ||
| 799 | int (*proc_init)(struct drm_minor *minor); | 790 | int (*proc_init)(struct drm_minor *minor); |
| 800 | void (*proc_cleanup)(struct drm_minor *minor); | 791 | void (*proc_cleanup)(struct drm_minor *minor); |
| @@ -900,6 +891,12 @@ struct drm_minor { | |||
| 900 | struct drm_mode_group mode_group; | 891 | struct drm_mode_group mode_group; |
| 901 | }; | 892 | }; |
| 902 | 893 | ||
| 894 | struct drm_pending_vblank_event { | ||
| 895 | struct drm_pending_event base; | ||
| 896 | int pipe; | ||
| 897 | struct drm_event_vblank event; | ||
| 898 | }; | ||
| 899 | |||
| 903 | /** | 900 | /** |
| 904 | * DRM device structure. This structure represent a complete card that | 901 | * DRM device structure. This structure represent a complete card that |
| 905 | * may contain multiple heads. | 902 | * may contain multiple heads. |
| @@ -999,6 +996,12 @@ struct drm_device { | |||
| 999 | 996 | ||
| 1000 | u32 max_vblank_count; /**< size of vblank counter register */ | 997 | u32 max_vblank_count; /**< size of vblank counter register */ |
| 1001 | 998 | ||
| 999 | /** | ||
| 1000 | * List of events | ||
| 1001 | */ | ||
| 1002 | struct list_head vblank_event_list; | ||
| 1003 | spinlock_t event_lock; | ||
| 1004 | |||
| 1002 | /*@} */ | 1005 | /*@} */ |
| 1003 | cycles_t ctx_start; | 1006 | cycles_t ctx_start; |
| 1004 | cycles_t lck_start; | 1007 | cycles_t lck_start; |
| @@ -1135,6 +1138,8 @@ extern int drm_lastclose(struct drm_device *dev); | |||
| 1135 | extern int drm_open(struct inode *inode, struct file *filp); | 1138 | extern int drm_open(struct inode *inode, struct file *filp); |
| 1136 | extern int drm_stub_open(struct inode *inode, struct file *filp); | 1139 | extern int drm_stub_open(struct inode *inode, struct file *filp); |
| 1137 | extern int drm_fasync(int fd, struct file *filp, int on); | 1140 | extern int drm_fasync(int fd, struct file *filp, int on); |
| 1141 | extern ssize_t drm_read(struct file *filp, char __user *buffer, | ||
| 1142 | size_t count, loff_t *offset); | ||
| 1138 | extern int drm_release(struct inode *inode, struct file *filp); | 1143 | extern int drm_release(struct inode *inode, struct file *filp); |
| 1139 | 1144 | ||
| 1140 | /* Mapping support (drm_vm.h) */ | 1145 | /* Mapping support (drm_vm.h) */ |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index b69347b8904f..219f075d2733 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
| @@ -123,7 +123,7 @@ struct drm_display_mode { | |||
| 123 | int type; | 123 | int type; |
| 124 | 124 | ||
| 125 | /* Proposed mode values */ | 125 | /* Proposed mode values */ |
| 126 | int clock; | 126 | int clock; /* in kHz */ |
| 127 | int hdisplay; | 127 | int hdisplay; |
| 128 | int hsync_start; | 128 | int hsync_start; |
| 129 | int hsync_end; | 129 | int hsync_end; |
| @@ -164,8 +164,8 @@ struct drm_display_mode { | |||
| 164 | int *private; | 164 | int *private; |
| 165 | int private_flags; | 165 | int private_flags; |
| 166 | 166 | ||
| 167 | int vrefresh; | 167 | int vrefresh; /* in Hz */ |
| 168 | float hsync; | 168 | int hsync; /* in kHz */ |
| 169 | }; | 169 | }; |
| 170 | 170 | ||
| 171 | enum drm_connector_status { | 171 | enum drm_connector_status { |
| @@ -242,6 +242,21 @@ struct drm_framebuffer_funcs { | |||
| 242 | int (*create_handle)(struct drm_framebuffer *fb, | 242 | int (*create_handle)(struct drm_framebuffer *fb, |
| 243 | struct drm_file *file_priv, | 243 | struct drm_file *file_priv, |
| 244 | unsigned int *handle); | 244 | unsigned int *handle); |
| 245 | /** | ||
| 246 | * Optinal callback for the dirty fb ioctl. | ||
| 247 | * | ||
| 248 | * Userspace can notify the driver via this callback | ||
| 249 | * that a area of the framebuffer has changed and should | ||
| 250 | * be flushed to the display hardware. | ||
| 251 | * | ||
| 252 | * See documentation in drm_mode.h for the struct | ||
| 253 | * drm_mode_fb_dirty_cmd for more information as all | ||
| 254 | * the semantics and arguments have a one to one mapping | ||
| 255 | * on this function. | ||
| 256 | */ | ||
| 257 | int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags, | ||
| 258 | unsigned color, struct drm_clip_rect *clips, | ||
| 259 | unsigned num_clips); | ||
| 245 | }; | 260 | }; |
| 246 | 261 | ||
| 247 | struct drm_framebuffer { | 262 | struct drm_framebuffer { |
| @@ -256,7 +271,7 @@ struct drm_framebuffer { | |||
| 256 | unsigned int depth; | 271 | unsigned int depth; |
| 257 | int bits_per_pixel; | 272 | int bits_per_pixel; |
| 258 | int flags; | 273 | int flags; |
| 259 | void *fbdev; | 274 | struct fb_info *fbdev; |
| 260 | u32 pseudo_palette[17]; | 275 | u32 pseudo_palette[17]; |
| 261 | struct list_head filp_head; | 276 | struct list_head filp_head; |
| 262 | /* if you are using the helper */ | 277 | /* if you are using the helper */ |
| @@ -290,6 +305,7 @@ struct drm_property { | |||
| 290 | struct drm_crtc; | 305 | struct drm_crtc; |
| 291 | struct drm_connector; | 306 | struct drm_connector; |
| 292 | struct drm_encoder; | 307 | struct drm_encoder; |
| 308 | struct drm_pending_vblank_event; | ||
| 293 | 309 | ||
| 294 | /** | 310 | /** |
| 295 | * drm_crtc_funcs - control CRTCs for a given device | 311 | * drm_crtc_funcs - control CRTCs for a given device |
| @@ -333,6 +349,19 @@ struct drm_crtc_funcs { | |||
| 333 | void (*destroy)(struct drm_crtc *crtc); | 349 | void (*destroy)(struct drm_crtc *crtc); |
| 334 | 350 | ||
| 335 | int (*set_config)(struct drm_mode_set *set); | 351 | int (*set_config)(struct drm_mode_set *set); |
| 352 | |||
| 353 | /* | ||
| 354 | * Flip to the given framebuffer. This implements the page | ||
| 355 | * flip ioctl descibed in drm_mode.h, specifically, the | ||
| 356 | * implementation must return immediately and block all | ||
| 357 | * rendering to the current fb until the flip has completed. | ||
| 358 | * If userspace set the event flag in the ioctl, the event | ||
| 359 | * argument will point to an event to send back when the flip | ||
| 360 | * completes, otherwise it will be NULL. | ||
| 361 | */ | ||
| 362 | int (*page_flip)(struct drm_crtc *crtc, | ||
| 363 | struct drm_framebuffer *fb, | ||
| 364 | struct drm_pending_vblank_event *event); | ||
| 336 | }; | 365 | }; |
| 337 | 366 | ||
| 338 | /** | 367 | /** |
| @@ -596,6 +625,7 @@ struct drm_mode_config { | |||
| 596 | /* Optional properties */ | 625 | /* Optional properties */ |
| 597 | struct drm_property *scaling_mode_property; | 626 | struct drm_property *scaling_mode_property; |
| 598 | struct drm_property *dithering_mode_property; | 627 | struct drm_property *dithering_mode_property; |
| 628 | struct drm_property *dirty_info_property; | ||
| 599 | }; | 629 | }; |
| 600 | 630 | ||
| 601 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) | 631 | #define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
| @@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev, | |||
| 667 | extern void drm_mode_prune_invalid(struct drm_device *dev, | 697 | extern void drm_mode_prune_invalid(struct drm_device *dev, |
| 668 | struct list_head *mode_list, bool verbose); | 698 | struct list_head *mode_list, bool verbose); |
| 669 | extern void drm_mode_sort(struct list_head *mode_list); | 699 | extern void drm_mode_sort(struct list_head *mode_list); |
| 700 | extern int drm_mode_hsync(struct drm_display_mode *mode); | ||
| 670 | extern int drm_mode_vrefresh(struct drm_display_mode *mode); | 701 | extern int drm_mode_vrefresh(struct drm_display_mode *mode); |
| 671 | extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, | 702 | extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, |
| 672 | int adjust_flags); | 703 | int adjust_flags); |
| @@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats | |||
| 703 | char *formats[]); | 734 | char *formats[]); |
| 704 | extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); | 735 | extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
| 705 | extern int drm_mode_create_dithering_property(struct drm_device *dev); | 736 | extern int drm_mode_create_dithering_property(struct drm_device *dev); |
| 737 | extern int drm_mode_create_dirty_info_property(struct drm_device *dev); | ||
| 706 | extern char *drm_get_encoder_name(struct drm_encoder *encoder); | 738 | extern char *drm_get_encoder_name(struct drm_encoder *encoder); |
| 707 | 739 | ||
| 708 | extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, | 740 | extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
| @@ -730,6 +762,8 @@ extern int drm_mode_rmfb(struct drm_device *dev, | |||
| 730 | void *data, struct drm_file *file_priv); | 762 | void *data, struct drm_file *file_priv); |
| 731 | extern int drm_mode_getfb(struct drm_device *dev, | 763 | extern int drm_mode_getfb(struct drm_device *dev, |
| 732 | void *data, struct drm_file *file_priv); | 764 | void *data, struct drm_file *file_priv); |
| 765 | extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | ||
| 766 | void *data, struct drm_file *file_priv); | ||
| 733 | extern int drm_mode_addmode_ioctl(struct drm_device *dev, | 767 | extern int drm_mode_addmode_ioctl(struct drm_device *dev, |
| 734 | void *data, struct drm_file *file_priv); | 768 | void *data, struct drm_file *file_priv); |
| 735 | extern int drm_mode_rmmode_ioctl(struct drm_device *dev, | 769 | extern int drm_mode_rmmode_ioctl(struct drm_device *dev, |
| @@ -756,6 +790,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, | |||
| 756 | extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, | 790 | extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, |
| 757 | void *data, struct drm_file *file_priv); | 791 | void *data, struct drm_file *file_priv); |
| 758 | extern bool drm_detect_hdmi_monitor(struct edid *edid); | 792 | extern bool drm_detect_hdmi_monitor(struct edid *edid); |
| 793 | extern int drm_mode_page_flip_ioctl(struct drm_device *dev, | ||
| 794 | void *data, struct drm_file *file_priv); | ||
| 759 | extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, | 795 | extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, |
| 760 | int hdisplay, int vdisplay, int vrefresh, | 796 | int hdisplay, int vdisplay, int vrefresh, |
| 761 | bool reduced, bool interlaced, bool margins); | 797 | bool reduced, bool interlaced, bool margins); |
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/include/drm/drm_dp_helper.h index 2b38054d3b6d..e49879ce95f9 100644 --- a/drivers/gpu/drm/i915/intel_dp.h +++ b/include/drm/drm_dp_helper.h | |||
| @@ -20,8 +20,8 @@ | |||
| 20 | * OF THIS SOFTWARE. | 20 | * OF THIS SOFTWARE. |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #ifndef _INTEL_DP_H_ | 23 | #ifndef _DRM_DP_HELPER_H_ |
| 24 | #define _INTEL_DP_H_ | 24 | #define _DRM_DP_HELPER_H_ |
| 25 | 25 | ||
| 26 | /* From the VESA DisplayPort spec */ | 26 | /* From the VESA DisplayPort spec */ |
| 27 | 27 | ||
| @@ -130,15 +130,20 @@ | |||
| 130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | 130 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 |
| 131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | 131 | #define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 |
| 132 | 132 | ||
| 133 | #define MODE_I2C_START 1 | ||
| 134 | #define MODE_I2C_WRITE 2 | ||
| 135 | #define MODE_I2C_READ 4 | ||
| 136 | #define MODE_I2C_STOP 8 | ||
| 137 | |||
| 133 | struct i2c_algo_dp_aux_data { | 138 | struct i2c_algo_dp_aux_data { |
| 134 | bool running; | 139 | bool running; |
| 135 | u16 address; | 140 | u16 address; |
| 136 | int (*aux_ch) (struct i2c_adapter *adapter, | 141 | int (*aux_ch) (struct i2c_adapter *adapter, |
| 137 | uint8_t *send, int send_bytes, | 142 | int mode, uint8_t write_byte, |
| 138 | uint8_t *recv, int recv_bytes); | 143 | uint8_t *read_byte); |
| 139 | }; | 144 | }; |
| 140 | 145 | ||
| 141 | int | 146 | int |
| 142 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); | 147 | i2c_dp_aux_add_bus(struct i2c_adapter *adapter); |
| 143 | 148 | ||
| 144 | #endif /* _INTEL_DP_H_ */ | 149 | #endif /* _DRM_DP_HELPER_H_ */ |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 7d6c9a2dfcbb..d33c3e038606 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
| @@ -106,6 +106,10 @@ struct detailed_data_color_point { | |||
| 106 | u8 wpindex2[3]; | 106 | u8 wpindex2[3]; |
| 107 | } __attribute__((packed)); | 107 | } __attribute__((packed)); |
| 108 | 108 | ||
| 109 | struct cvt_timing { | ||
| 110 | u8 code[3]; | ||
| 111 | } __attribute__((packed)); | ||
| 112 | |||
| 109 | struct detailed_non_pixel { | 113 | struct detailed_non_pixel { |
| 110 | u8 pad1; | 114 | u8 pad1; |
| 111 | u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name | 115 | u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name |
| @@ -117,9 +121,13 @@ struct detailed_non_pixel { | |||
| 117 | struct detailed_data_monitor_range range; | 121 | struct detailed_data_monitor_range range; |
| 118 | struct detailed_data_wpindex color; | 122 | struct detailed_data_wpindex color; |
| 119 | struct std_timing timings[5]; | 123 | struct std_timing timings[5]; |
| 124 | struct cvt_timing cvt[4]; | ||
| 120 | } data; | 125 | } data; |
| 121 | } __attribute__((packed)); | 126 | } __attribute__((packed)); |
| 122 | 127 | ||
| 128 | #define EDID_DETAIL_EST_TIMINGS 0xf7 | ||
| 129 | #define EDID_DETAIL_CVT_3BYTE 0xf8 | ||
| 130 | #define EDID_DETAIL_COLOR_MGMT_DATA 0xf9 | ||
| 123 | #define EDID_DETAIL_STD_MODES 0xfa | 131 | #define EDID_DETAIL_STD_MODES 0xfa |
| 124 | #define EDID_DETAIL_MONITOR_CPDATA 0xfb | 132 | #define EDID_DETAIL_MONITOR_CPDATA 0xfb |
| 125 | #define EDID_DETAIL_MONITOR_NAME 0xfc | 133 | #define EDID_DETAIL_MONITOR_NAME 0xfc |
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h index 1f908416aedb..43009bc2e757 100644 --- a/include/drm/drm_mode.h +++ b/include/drm/drm_mode.h | |||
| @@ -27,9 +27,6 @@ | |||
| 27 | #ifndef _DRM_MODE_H | 27 | #ifndef _DRM_MODE_H |
| 28 | #define _DRM_MODE_H | 28 | #define _DRM_MODE_H |
| 29 | 29 | ||
| 30 | #include <linux/kernel.h> | ||
| 31 | #include <linux/types.h> | ||
| 32 | |||
| 33 | #define DRM_DISPLAY_INFO_LEN 32 | 30 | #define DRM_DISPLAY_INFO_LEN 32 |
| 34 | #define DRM_CONNECTOR_NAME_LEN 32 | 31 | #define DRM_CONNECTOR_NAME_LEN 32 |
| 35 | #define DRM_DISPLAY_MODE_LEN 32 | 32 | #define DRM_DISPLAY_MODE_LEN 32 |
| @@ -78,6 +75,11 @@ | |||
| 78 | #define DRM_MODE_DITHERING_OFF 0 | 75 | #define DRM_MODE_DITHERING_OFF 0 |
| 79 | #define DRM_MODE_DITHERING_ON 1 | 76 | #define DRM_MODE_DITHERING_ON 1 |
| 80 | 77 | ||
| 78 | /* Dirty info options */ | ||
| 79 | #define DRM_MODE_DIRTY_OFF 0 | ||
| 80 | #define DRM_MODE_DIRTY_ON 1 | ||
| 81 | #define DRM_MODE_DIRTY_ANNOTATE 2 | ||
| 82 | |||
| 81 | struct drm_mode_modeinfo { | 83 | struct drm_mode_modeinfo { |
| 82 | __u32 clock; | 84 | __u32 clock; |
| 83 | __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; | 85 | __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
| @@ -225,6 +227,45 @@ struct drm_mode_fb_cmd { | |||
| 225 | __u32 handle; | 227 | __u32 handle; |
| 226 | }; | 228 | }; |
| 227 | 229 | ||
| 230 | #define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01 | ||
| 231 | #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 | ||
| 232 | #define DRM_MODE_FB_DIRTY_FLAGS 0x03 | ||
| 233 | |||
| 234 | /* | ||
| 235 | * Mark a region of a framebuffer as dirty. | ||
| 236 | * | ||
| 237 | * Some hardware does not automatically update display contents | ||
| 238 | * as a hardware or software draw to a framebuffer. This ioctl | ||
| 239 | * allows userspace to tell the kernel and the hardware what | ||
| 240 | * regions of the framebuffer have changed. | ||
| 241 | * | ||
| 242 | * The kernel or hardware is free to update more then just the | ||
| 243 | * region specified by the clip rects. The kernel or hardware | ||
| 244 | * may also delay and/or coalesce several calls to dirty into a | ||
| 245 | * single update. | ||
| 246 | * | ||
| 247 | * Userspace may annotate the updates, the annotates are a | ||
| 248 | * promise made by the caller that the change is either a copy | ||
| 249 | * of pixels or a fill of a single color in the region specified. | ||
| 250 | * | ||
| 251 | * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then | ||
| 252 | * the number of updated regions are half of num_clips given, | ||
| 253 | * where the clip rects are paired in src and dst. The width and | ||
| 254 | * height of each one of the pairs must match. | ||
| 255 | * | ||
| 256 | * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller | ||
| 257 | * promises that the region specified of the clip rects is filled | ||
| 258 | * completely with a single color as given in the color argument. | ||
| 259 | */ | ||
| 260 | |||
| 261 | struct drm_mode_fb_dirty_cmd { | ||
| 262 | __u32 fb_id; | ||
| 263 | __u32 flags; | ||
| 264 | __u32 color; | ||
| 265 | __u32 num_clips; | ||
| 266 | __u64 clips_ptr; | ||
| 267 | }; | ||
| 268 | |||
| 228 | struct drm_mode_mode_cmd { | 269 | struct drm_mode_mode_cmd { |
| 229 | __u32 connector_id; | 270 | __u32 connector_id; |
| 230 | struct drm_mode_modeinfo mode; | 271 | struct drm_mode_modeinfo mode; |
| @@ -268,4 +309,37 @@ struct drm_mode_crtc_lut { | |||
| 268 | __u64 blue; | 309 | __u64 blue; |
| 269 | }; | 310 | }; |
| 270 | 311 | ||
| 312 | #define DRM_MODE_PAGE_FLIP_EVENT 0x01 | ||
| 313 | #define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Request a page flip on the specified crtc. | ||
| 317 | * | ||
| 318 | * This ioctl will ask KMS to schedule a page flip for the specified | ||
| 319 | * crtc. Once any pending rendering targeting the specified fb (as of | ||
| 320 | * ioctl time) has completed, the crtc will be reprogrammed to display | ||
| 321 | * that fb after the next vertical refresh. The ioctl returns | ||
| 322 | * immediately, but subsequent rendering to the current fb will block | ||
| 323 | * in the execbuffer ioctl until the page flip happens. If a page | ||
| 324 | * flip is already pending as the ioctl is called, EBUSY will be | ||
| 325 | * returned. | ||
| 326 | * | ||
| 327 | * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will | ||
| 328 | * request that drm sends back a vblank event (see drm.h: struct | ||
| 329 | * drm_event_vblank) when the page flip is done. The user_data field | ||
| 330 | * passed in with this ioctl will be returned as the user_data field | ||
| 331 | * in the vblank event struct. | ||
| 332 | * | ||
| 333 | * The reserved field must be zero until we figure out something | ||
| 334 | * clever to use it for. | ||
| 335 | */ | ||
| 336 | |||
| 337 | struct drm_mode_crtc_page_flip { | ||
| 338 | __u32 crtc_id; | ||
| 339 | __u32 fb_id; | ||
| 340 | __u32 flags; | ||
| 341 | __u32 reserved; | ||
| 342 | __u64 user_data; | ||
| 343 | }; | ||
| 344 | |||
| 271 | #endif | 345 | #endif |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 7e0cb1da92e6..a04c3ab1d726 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
| @@ -27,11 +27,11 @@ | |||
| 27 | #ifndef _I915_DRM_H_ | 27 | #ifndef _I915_DRM_H_ |
| 28 | #define _I915_DRM_H_ | 28 | #define _I915_DRM_H_ |
| 29 | 29 | ||
| 30 | #include "drm.h" | ||
| 31 | |||
| 30 | /* Please note that modifications to all structs defined here are | 32 | /* Please note that modifications to all structs defined here are |
| 31 | * subject to backwards-compatibility constraints. | 33 | * subject to backwards-compatibility constraints. |
| 32 | */ | 34 | */ |
| 33 | #include <linux/types.h> | ||
| 34 | #include "drm.h" | ||
| 35 | 35 | ||
| 36 | /* Each region is a minimum of 16k, and there are at most 255 of them. | 36 | /* Each region is a minimum of 16k, and there are at most 255 of them. |
| 37 | */ | 37 | */ |
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h index 325fd6fb4a42..3ffbc4798afa 100644 --- a/include/drm/mga_drm.h +++ b/include/drm/mga_drm.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | #ifndef __MGA_DRM_H__ | 35 | #ifndef __MGA_DRM_H__ |
| 36 | #define __MGA_DRM_H__ | 36 | #define __MGA_DRM_H__ |
| 37 | 37 | ||
| 38 | #include <linux/types.h> | 38 | #include "drm.h" |
| 39 | 39 | ||
| 40 | /* WARNING: If you change any of these defines, make sure to change the | 40 | /* WARNING: If you change any of these defines, make sure to change the |
| 41 | * defines in the Xserver file (mga_sarea.h) | 41 | * defines in the Xserver file (mga_sarea.h) |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 3b9932ab1756..39537f3cf98a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #ifndef __RADEON_DRM_H__ | 33 | #ifndef __RADEON_DRM_H__ |
| 34 | #define __RADEON_DRM_H__ | 34 | #define __RADEON_DRM_H__ |
| 35 | 35 | ||
| 36 | #include <linux/types.h> | 36 | #include "drm.h" |
| 37 | 37 | ||
| 38 | /* WARNING: If you change any of these defines, make sure to change the | 38 | /* WARNING: If you change any of these defines, make sure to change the |
| 39 | * defines in the X server file (radeon_sarea.h) | 39 | * defines in the X server file (radeon_sarea.h) |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e8cd6d20aed2..7a39ab9aa1d1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
| @@ -545,6 +545,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm, | |||
| 545 | extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); | 545 | extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); |
| 546 | 546 | ||
| 547 | /** | 547 | /** |
| 548 | * ttm_tt_populate: | ||
| 549 | * | ||
| 550 | * @ttm: The struct ttm_tt to contain the backing pages. | ||
| 551 | * | ||
| 552 | * Add backing pages to all of @ttm | ||
| 553 | */ | ||
| 554 | extern int ttm_tt_populate(struct ttm_tt *ttm); | ||
| 555 | |||
| 556 | /** | ||
| 548 | * ttm_ttm_destroy: | 557 | * ttm_ttm_destroy: |
| 549 | * | 558 | * |
| 550 | * @ttm: The struct ttm_tt. | 559 | * @ttm: The struct ttm_tt. |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h new file mode 100644 index 000000000000..cd2c475da9ea --- /dev/null +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
| @@ -0,0 +1,107 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | /* | ||
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
| 29 | */ | ||
| 30 | |||
| 31 | #ifndef _TTM_EXECBUF_UTIL_H_ | ||
| 32 | #define _TTM_EXECBUF_UTIL_H_ | ||
| 33 | |||
| 34 | #include "ttm/ttm_bo_api.h" | ||
| 35 | #include <linux/list.h> | ||
| 36 | |||
| 37 | /** | ||
| 38 | * struct ttm_validate_buffer | ||
| 39 | * | ||
| 40 | * @head: list head for thread-private list. | ||
| 41 | * @bo: refcounted buffer object pointer. | ||
| 42 | * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once | ||
| 43 | * adding a new sync object. | ||
| 44 | * @reservied: Indicates whether @bo has been reserved for validation. | ||
| 45 | */ | ||
| 46 | |||
| 47 | struct ttm_validate_buffer { | ||
| 48 | struct list_head head; | ||
| 49 | struct ttm_buffer_object *bo; | ||
| 50 | void *new_sync_obj_arg; | ||
| 51 | bool reserved; | ||
| 52 | }; | ||
| 53 | |||
| 54 | /** | ||
| 55 | * function ttm_eu_backoff_reservation | ||
| 56 | * | ||
| 57 | * @list: thread private list of ttm_validate_buffer structs. | ||
| 58 | * | ||
| 59 | * Undoes all buffer validation reservations for bos pointed to by | ||
| 60 | * the list entries. | ||
| 61 | */ | ||
| 62 | |||
| 63 | extern void ttm_eu_backoff_reservation(struct list_head *list); | ||
| 64 | |||
| 65 | /** | ||
| 66 | * function ttm_eu_reserve_buffers | ||
| 67 | * | ||
| 68 | * @list: thread private list of ttm_validate_buffer structs. | ||
| 69 | * @val_seq: A unique sequence number. | ||
| 70 | * | ||
| 71 | * Tries to reserve bos pointed to by the list entries for validation. | ||
| 72 | * If the function returns 0, all buffers are marked as "unfenced", | ||
| 73 | * taken off the lru lists and are not synced for write CPU usage. | ||
| 74 | * | ||
| 75 | * If the function detects a deadlock due to multiple threads trying to | ||
| 76 | * reserve the same buffers in reverse order, all threads except one will | ||
| 77 | * back off and retry. This function may sleep while waiting for | ||
| 78 | * CPU write reservations to be cleared, and for other threads to | ||
| 79 | * unreserve their buffers. | ||
| 80 | * | ||
| 81 | * This function may return -ERESTART or -EAGAIN if the calling process | ||
| 82 | * receives a signal while waiting. In that case, no buffers on the list | ||
| 83 | * will be reserved upon return. | ||
| 84 | * | ||
| 85 | * Buffers reserved by this function should be unreserved by | ||
| 86 | * a call to either ttm_eu_backoff_reservation() or | ||
| 87 | * ttm_eu_fence_buffer_objects() when command submission is complete or | ||
| 88 | * has failed. | ||
| 89 | */ | ||
| 90 | |||
| 91 | extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq); | ||
| 92 | |||
| 93 | /** | ||
| 94 | * function ttm_eu_fence_buffer_objects. | ||
| 95 | * | ||
| 96 | * @list: thread private list of ttm_validate_buffer structs. | ||
| 97 | * @sync_obj: The new sync object for the buffers. | ||
| 98 | * | ||
| 99 | * This function should be called when command submission is complete, and | ||
| 100 | * it will add a new sync object to bos pointed to by entries on @list. | ||
| 101 | * It also unreserves all buffers, putting them on lru lists. | ||
| 102 | * | ||
| 103 | */ | ||
| 104 | |||
| 105 | extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj); | ||
| 106 | |||
| 107 | #endif | ||
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h new file mode 100644 index 000000000000..81ba0b0b891a --- /dev/null +++ b/include/drm/ttm/ttm_lock.h | |||
| @@ -0,0 +1,247 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | /* | ||
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
| 29 | */ | ||
| 30 | |||
| 31 | /** @file ttm_lock.h | ||
| 32 | * This file implements a simple replacement for the buffer manager use | ||
| 33 | * of the DRM heavyweight hardware lock. | ||
| 34 | * The lock is a read-write lock. Taking it in read mode and write mode | ||
| 35 | * is relatively fast, and intended for in-kernel use only. | ||
| 36 | * | ||
| 37 | * The vt mode is used only when there is a need to block all | ||
| 38 | * user-space processes from validating buffers. | ||
| 39 | * It's allowed to leave kernel space with the vt lock held. | ||
| 40 | * If a user-space process dies while having the vt-lock, | ||
| 41 | * it will be released during the file descriptor release. The vt lock | ||
| 42 | * excludes write lock and read lock. | ||
| 43 | * | ||
| 44 | * The suspend mode is used to lock out all TTM users when preparing for | ||
| 45 | * and executing suspend operations. | ||
| 46 | * | ||
| 47 | */ | ||
| 48 | |||
| 49 | #ifndef _TTM_LOCK_H_ | ||
| 50 | #define _TTM_LOCK_H_ | ||
| 51 | |||
| 52 | #include "ttm/ttm_object.h" | ||
| 53 | #include <linux/wait.h> | ||
| 54 | #include <asm/atomic.h> | ||
| 55 | |||
| 56 | /** | ||
| 57 | * struct ttm_lock | ||
| 58 | * | ||
| 59 | * @base: ttm base object used solely to release the lock if the client | ||
| 60 | * holding the lock dies. | ||
| 61 | * @queue: Queue for processes waiting for lock change-of-status. | ||
| 62 | * @lock: Spinlock protecting some lock members. | ||
| 63 | * @rw: Read-write lock counter. Protected by @lock. | ||
| 64 | * @flags: Lock state. Protected by @lock. | ||
| 65 | * @kill_takers: Boolean whether to kill takers of the lock. | ||
| 66 | * @signal: Signal to send when kill_takers is true. | ||
| 67 | */ | ||
| 68 | |||
| 69 | struct ttm_lock { | ||
| 70 | struct ttm_base_object base; | ||
| 71 | wait_queue_head_t queue; | ||
| 72 | spinlock_t lock; | ||
| 73 | int32_t rw; | ||
| 74 | uint32_t flags; | ||
| 75 | bool kill_takers; | ||
| 76 | int signal; | ||
| 77 | struct ttm_object_file *vt_holder; | ||
| 78 | }; | ||
| 79 | |||
| 80 | |||
| 81 | /** | ||
| 82 | * ttm_lock_init | ||
| 83 | * | ||
| 84 | * @lock: Pointer to a struct ttm_lock | ||
| 85 | * Initializes the lock. | ||
| 86 | */ | ||
| 87 | extern void ttm_lock_init(struct ttm_lock *lock); | ||
| 88 | |||
| 89 | /** | ||
| 90 | * ttm_read_unlock | ||
| 91 | * | ||
| 92 | * @lock: Pointer to a struct ttm_lock | ||
| 93 | * | ||
| 94 | * Releases a read lock. | ||
| 95 | */ | ||
| 96 | extern void ttm_read_unlock(struct ttm_lock *lock); | ||
| 97 | |||
| 98 | /** | ||
| 99 | * ttm_read_lock | ||
| 100 | * | ||
| 101 | * @lock: Pointer to a struct ttm_lock | ||
| 102 | * @interruptible: Interruptible sleeping while waiting for a lock. | ||
| 103 | * | ||
| 104 | * Takes the lock in read mode. | ||
| 105 | * Returns: | ||
| 106 | * -ERESTARTSYS If interrupted by a signal and interruptible is true. | ||
| 107 | */ | ||
| 108 | extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible); | ||
| 109 | |||
| 110 | /** | ||
| 111 | * ttm_read_trylock | ||
| 112 | * | ||
| 113 | * @lock: Pointer to a struct ttm_lock | ||
| 114 | * @interruptible: Interruptible sleeping while waiting for a lock. | ||
| 115 | * | ||
| 116 | * Tries to take the lock in read mode. If the lock is already held | ||
| 117 | * in write mode, the function will return -EBUSY. If the lock is held | ||
| 118 | * in vt or suspend mode, the function will sleep until these modes | ||
| 119 | * are unlocked. | ||
| 120 | * | ||
| 121 | * Returns: | ||
| 122 | * -EBUSY The lock was already held in write mode. | ||
| 123 | * -ERESTARTSYS If interrupted by a signal and interruptible is true. | ||
| 124 | */ | ||
| 125 | extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible); | ||
| 126 | |||
| 127 | /** | ||
| 128 | * ttm_write_unlock | ||
| 129 | * | ||
| 130 | * @lock: Pointer to a struct ttm_lock | ||
| 131 | * | ||
| 132 | * Releases a write lock. | ||
| 133 | */ | ||
| 134 | extern void ttm_write_unlock(struct ttm_lock *lock); | ||
| 135 | |||
| 136 | /** | ||
| 137 | * ttm_write_lock | ||
| 138 | * | ||
| 139 | * @lock: Pointer to a struct ttm_lock | ||
| 140 | * @interruptible: Interruptible sleeping while waiting for a lock. | ||
| 141 | * | ||
| 142 | * Takes the lock in write mode. | ||
| 143 | * Returns: | ||
| 144 | * -ERESTARTSYS If interrupted by a signal and interruptible is true. | ||
| 145 | */ | ||
| 146 | extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); | ||
| 147 | |||
| 148 | /** | ||
| 149 | * ttm_lock_downgrade | ||
| 150 | * | ||
| 151 | * @lock: Pointer to a struct ttm_lock | ||
| 152 | * | ||
| 153 | * Downgrades a write lock to a read lock. | ||
| 154 | */ | ||
| 155 | extern void ttm_lock_downgrade(struct ttm_lock *lock); | ||
| 156 | |||
| 157 | /** | ||
| 158 | * ttm_suspend_lock | ||
| 159 | * | ||
| 160 | * @lock: Pointer to a struct ttm_lock | ||
| 161 | * | ||
| 162 | * Takes the lock in suspend mode. Excludes read and write mode. | ||
| 163 | */ | ||
| 164 | extern void ttm_suspend_lock(struct ttm_lock *lock); | ||
| 165 | |||
| 166 | /** | ||
| 167 | * ttm_suspend_unlock | ||
| 168 | * | ||
| 169 | * @lock: Pointer to a struct ttm_lock | ||
| 170 | * | ||
| 171 | * Releases a suspend lock | ||
| 172 | */ | ||
| 173 | extern void ttm_suspend_unlock(struct ttm_lock *lock); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * ttm_vt_lock | ||
| 177 | * | ||
| 178 | * @lock: Pointer to a struct ttm_lock | ||
| 179 | * @interruptible: Interruptible sleeping while waiting for a lock. | ||
| 180 | * @tfile: Pointer to a struct ttm_object_file to register the lock with. | ||
| 181 | * | ||
| 182 | * Takes the lock in vt mode. | ||
| 183 | * Returns: | ||
| 184 | * -ERESTARTSYS If interrupted by a signal and interruptible is true. | ||
| 185 | * -ENOMEM: Out of memory when locking. | ||
| 186 | */ | ||
| 187 | extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible, | ||
| 188 | struct ttm_object_file *tfile); | ||
| 189 | |||
| 190 | /** | ||
| 191 | * ttm_vt_unlock | ||
| 192 | * | ||
| 193 | * @lock: Pointer to a struct ttm_lock | ||
| 194 | * | ||
| 195 | * Releases a vt lock. | ||
| 196 | * Returns: | ||
| 197 | * -EINVAL If the lock was not held. | ||
| 198 | */ | ||
| 199 | extern int ttm_vt_unlock(struct ttm_lock *lock); | ||
| 200 | |||
| 201 | /** | ||
| 202 | * ttm_write_unlock | ||
| 203 | * | ||
| 204 | * @lock: Pointer to a struct ttm_lock | ||
| 205 | * | ||
| 206 | * Releases a write lock. | ||
| 207 | */ | ||
| 208 | extern void ttm_write_unlock(struct ttm_lock *lock); | ||
| 209 | |||
| 210 | /** | ||
| 211 | * ttm_write_lock | ||
| 212 | * | ||
| 213 | * @lock: Pointer to a struct ttm_lock | ||
| 214 | * @interruptible: Interruptible sleeping while waiting for a lock. | ||
| 215 | * | ||
| 216 | * Takes the lock in write mode. | ||
| 217 | * Returns: | ||
| 218 | * -ERESTARTSYS If interrupted by a signal and interruptible is true. | ||
| 219 | */ | ||
| 220 | extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); | ||
| 221 | |||
| 222 | /** | ||
| 223 | * ttm_lock_set_kill | ||
| 224 | * | ||
| 225 | * @lock: Pointer to a struct ttm_lock | ||
| 226 | * @val: Boolean whether to kill processes taking the lock. | ||
| 227 | * @signal: Signal to send to the process taking the lock. | ||
| 228 | * | ||
| 229 | * The kill-when-taking-lock functionality is used to kill processes that keep | ||
| 230 | * on using the TTM functionality when its resources has been taken down, for | ||
| 231 | * example when the X server exits. A typical sequence would look like this: | ||
| 232 | * - X server takes lock in write mode. | ||
| 233 | * - ttm_lock_set_kill() is called with @val set to true. | ||
| 234 | * - As part of X server exit, TTM resources are taken down. | ||
| 235 | * - X server releases the lock on file release. | ||
| 236 | * - Another dri client wants to render, takes the lock and is killed. | ||
| 237 | * | ||
| 238 | */ | ||
| 239 | static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val, | ||
| 240 | int signal) | ||
| 241 | { | ||
| 242 | lock->kill_takers = val; | ||
| 243 | if (val) | ||
| 244 | lock->signal = signal; | ||
| 245 | } | ||
| 246 | |||
| 247 | #endif | ||
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h index 6983a7cf4da4..b199170b3c2c 100644 --- a/include/drm/ttm/ttm_memory.h +++ b/include/drm/ttm/ttm_memory.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/wait.h> | 33 | #include <linux/wait.h> |
| 34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| 35 | #include <linux/kobject.h> | 35 | #include <linux/kobject.h> |
| 36 | #include <linux/mm.h> | ||
| 36 | 37 | ||
| 37 | /** | 38 | /** |
| 38 | * struct ttm_mem_shrink - callback to shrink TTM memory usage. | 39 | * struct ttm_mem_shrink - callback to shrink TTM memory usage. |
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h new file mode 100644 index 000000000000..703ca4db0a29 --- /dev/null +++ b/include/drm/ttm/ttm_object.h | |||
| @@ -0,0 +1,267 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | /* | ||
| 28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | ||
| 29 | */ | ||
| 30 | /** @file ttm_object.h | ||
| 31 | * | ||
| 32 | * Base- and reference object implementation for the various | ||
| 33 | * ttm objects. Implements reference counting, minimal security checks | ||
| 34 | * and release on file close. | ||
| 35 | */ | ||
| 36 | |||
| 37 | #ifndef _TTM_OBJECT_H_ | ||
| 38 | #define _TTM_OBJECT_H_ | ||
| 39 | |||
| 40 | #include <linux/list.h> | ||
| 41 | #include "drm_hashtab.h" | ||
| 42 | #include <linux/kref.h> | ||
| 43 | #include <ttm/ttm_memory.h> | ||
| 44 | |||
| 45 | /** | ||
| 46 | * enum ttm_ref_type | ||
| 47 | * | ||
| 48 | * Describes what type of reference a ref object holds. | ||
| 49 | * | ||
| 50 | * TTM_REF_USAGE is a simple refcount on a base object. | ||
| 51 | * | ||
| 52 | * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a | ||
| 53 | * buffer object. | ||
| 54 | * | ||
| 55 | * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a | ||
| 56 | * buffer object. | ||
| 57 | * | ||
| 58 | */ | ||
| 59 | |||
| 60 | enum ttm_ref_type { | ||
| 61 | TTM_REF_USAGE, | ||
| 62 | TTM_REF_SYNCCPU_READ, | ||
| 63 | TTM_REF_SYNCCPU_WRITE, | ||
| 64 | TTM_REF_NUM | ||
| 65 | }; | ||
| 66 | |||
| 67 | /** | ||
| 68 | * enum ttm_object_type | ||
| 69 | * | ||
| 70 | * One entry per ttm object type. | ||
| 71 | * Device-specific types should use the | ||
| 72 | * ttm_driver_typex types. | ||
| 73 | */ | ||
| 74 | |||
| 75 | enum ttm_object_type { | ||
| 76 | ttm_fence_type, | ||
| 77 | ttm_buffer_type, | ||
| 78 | ttm_lock_type, | ||
| 79 | ttm_driver_type0 = 256, | ||
| 80 | ttm_driver_type1 | ||
| 81 | }; | ||
| 82 | |||
| 83 | struct ttm_object_file; | ||
| 84 | struct ttm_object_device; | ||
| 85 | |||
| 86 | /** | ||
| 87 | * struct ttm_base_object | ||
| 88 | * | ||
| 89 | * @hash: hash entry for the per-device object hash. | ||
| 90 | * @type: derived type this object is base class for. | ||
| 91 | * @shareable: Other ttm_object_files can access this object. | ||
| 92 | * | ||
| 93 | * @tfile: Pointer to ttm_object_file of the creator. | ||
| 94 | * NULL if the object was not created by a user request. | ||
| 95 | * (kernel object). | ||
| 96 | * | ||
| 97 | * @refcount: Number of references to this object, not | ||
| 98 | * including the hash entry. A reference to a base object can | ||
| 99 | * only be held by a ref object. | ||
| 100 | * | ||
| 101 | * @refcount_release: A function to be called when there are | ||
| 102 | * no more references to this object. This function should | ||
| 103 | * destroy the object (or make sure destruction eventually happens), | ||
| 104 | * and when it is called, the object has | ||
| 105 | * already been taken out of the per-device hash. The parameter | ||
| 106 | * "base" should be set to NULL by the function. | ||
| 107 | * | ||
| 108 | * @ref_obj_release: A function to be called when a reference object | ||
| 109 | * with another ttm_ref_type than TTM_REF_USAGE is deleted. | ||
| 110 | * this function may, for example, release a lock held by a user-space | ||
| 111 | * process. | ||
| 112 | * | ||
| 113 | * This struct is intended to be used as a base struct for objects that | ||
| 114 | * are visible to user-space. It provides a global name, race-safe | ||
| 115 | * access and refcounting, minimal access contol and hooks for unref actions. | ||
| 116 | */ | ||
| 117 | |||
| 118 | struct ttm_base_object { | ||
| 119 | struct drm_hash_item hash; | ||
| 120 | enum ttm_object_type object_type; | ||
| 121 | bool shareable; | ||
| 122 | struct ttm_object_file *tfile; | ||
| 123 | struct kref refcount; | ||
| 124 | void (*refcount_release) (struct ttm_base_object **base); | ||
| 125 | void (*ref_obj_release) (struct ttm_base_object *base, | ||
| 126 | enum ttm_ref_type ref_type); | ||
| 127 | }; | ||
| 128 | |||
| 129 | /** | ||
| 130 | * ttm_base_object_init | ||
| 131 | * | ||
| 132 | * @tfile: Pointer to a struct ttm_object_file. | ||
| 133 | * @base: The struct ttm_base_object to initialize. | ||
| 134 | * @shareable: This object is shareable with other applcations. | ||
| 135 | * (different @tfile pointers.) | ||
| 136 | * @type: The object type. | ||
| 137 | * @refcount_release: See the struct ttm_base_object description. | ||
| 138 | * @ref_obj_release: See the struct ttm_base_object description. | ||
| 139 | * | ||
| 140 | * Initializes a struct ttm_base_object. | ||
| 141 | */ | ||
| 142 | |||
| 143 | extern int ttm_base_object_init(struct ttm_object_file *tfile, | ||
| 144 | struct ttm_base_object *base, | ||
| 145 | bool shareable, | ||
| 146 | enum ttm_object_type type, | ||
| 147 | void (*refcount_release) (struct ttm_base_object | ||
| 148 | **), | ||
| 149 | void (*ref_obj_release) (struct ttm_base_object | ||
| 150 | *, | ||
| 151 | enum ttm_ref_type | ||
| 152 | ref_type)); | ||
| 153 | |||
| 154 | /** | ||
| 155 | * ttm_base_object_lookup | ||
| 156 | * | ||
| 157 | * @tfile: Pointer to a struct ttm_object_file. | ||
| 158 | * @key: Hash key | ||
| 159 | * | ||
| 160 | * Looks up a struct ttm_base_object with the key @key. | ||
| 161 | * Also verifies that the object is visible to the application, by | ||
| 162 | * comparing the @tfile argument and checking the object shareable flag. | ||
| 163 | */ | ||
| 164 | |||
| 165 | extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file | ||
| 166 | *tfile, uint32_t key); | ||
| 167 | |||
| 168 | /** | ||
| 169 | * ttm_base_object_unref | ||
| 170 | * | ||
| 171 | * @p_base: Pointer to a pointer referncing a struct ttm_base_object. | ||
| 172 | * | ||
| 173 | * Decrements the base object refcount and clears the pointer pointed to by | ||
| 174 | * p_base. | ||
| 175 | */ | ||
| 176 | |||
| 177 | extern void ttm_base_object_unref(struct ttm_base_object **p_base); | ||
| 178 | |||
| 179 | /** | ||
| 180 | * ttm_ref_object_add. | ||
| 181 | * | ||
| 182 | * @tfile: A struct ttm_object_file representing the application owning the | ||
| 183 | * ref_object. | ||
| 184 | * @base: The base object to reference. | ||
| 185 | * @ref_type: The type of reference. | ||
| 186 | * @existed: Upon completion, indicates that an identical reference object | ||
| 187 | * already existed, and the refcount was upped on that object instead. | ||
| 188 | * | ||
| 189 | * Adding a ref object to a base object is basically like referencing the | ||
| 190 | * base object, but a user-space application holds the reference. When the | ||
| 191 | * file corresponding to @tfile is closed, all its reference objects are | ||
| 192 | * deleted. A reference object can have different types depending on what | ||
| 193 | * it's intended for. It can be refcounting to prevent object destruction, | ||
| 194 | * When user-space takes a lock, it can add a ref object to that lock to | ||
| 195 | * make sure the lock is released if the application dies. A ref object | ||
| 196 | * will hold a single reference on a base object. | ||
| 197 | */ | ||
| 198 | extern int ttm_ref_object_add(struct ttm_object_file *tfile, | ||
| 199 | struct ttm_base_object *base, | ||
| 200 | enum ttm_ref_type ref_type, bool *existed); | ||
| 201 | /** | ||
| 202 | * ttm_ref_object_base_unref | ||
| 203 | * | ||
| 204 | * @key: Key representing the base object. | ||
| 205 | * @ref_type: Ref type of the ref object to be dereferenced. | ||
| 206 | * | ||
| 207 | * Unreference a ref object with type @ref_type | ||
| 208 | * on the base object identified by @key. If there are no duplicate | ||
| 209 | * references, the ref object will be destroyed and the base object | ||
| 210 | * will be unreferenced. | ||
| 211 | */ | ||
| 212 | extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile, | ||
| 213 | unsigned long key, | ||
| 214 | enum ttm_ref_type ref_type); | ||
| 215 | |||
| 216 | /** | ||
| 217 | * ttm_object_file_init - initialize a struct ttm_object file | ||
| 218 | * | ||
| 219 | * @tdev: A struct ttm_object device this file is initialized on. | ||
| 220 | * @hash_order: Order of the hash table used to hold the reference objects. | ||
| 221 | * | ||
| 222 | * This is typically called by the file_ops::open function. | ||
| 223 | */ | ||
| 224 | |||
| 225 | extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device | ||
| 226 | *tdev, | ||
| 227 | unsigned int hash_order); | ||
| 228 | |||
| 229 | /** | ||
| 230 | * ttm_object_file_release - release data held by a ttm_object_file | ||
| 231 | * | ||
| 232 | * @p_tfile: Pointer to pointer to the ttm_object_file object to release. | ||
| 233 | * *p_tfile will be set to NULL by this function. | ||
| 234 | * | ||
| 235 | * Releases all data associated by a ttm_object_file. | ||
| 236 | * Typically called from file_ops::release. The caller must | ||
| 237 | * ensure that there are no concurrent users of tfile. | ||
| 238 | */ | ||
| 239 | |||
| 240 | extern void ttm_object_file_release(struct ttm_object_file **p_tfile); | ||
| 241 | |||
| 242 | /** | ||
| 243 | * ttm_object device init - initialize a struct ttm_object_device | ||
| 244 | * | ||
| 245 | * @hash_order: Order of hash table used to hash the base objects. | ||
| 246 | * | ||
| 247 | * This function is typically called on device initialization to prepare | ||
| 248 | * data structures needed for ttm base and ref objects. | ||
| 249 | */ | ||
| 250 | |||
| 251 | extern struct ttm_object_device *ttm_object_device_init | ||
| 252 | (struct ttm_mem_global *mem_glob, unsigned int hash_order); | ||
| 253 | |||
| 254 | /** | ||
| 255 | * ttm_object_device_release - release data held by a ttm_object_device | ||
| 256 | * | ||
| 257 | * @p_tdev: Pointer to pointer to the ttm_object_device object to release. | ||
| 258 | * *p_tdev will be set to NULL by this function. | ||
| 259 | * | ||
| 260 | * Releases all data associated by a ttm_object_device. | ||
| 261 | * Typically called from driver::unload before the destruction of the | ||
| 262 | * device private data structure. | ||
| 263 | */ | ||
| 264 | |||
| 265 | extern void ttm_object_device_release(struct ttm_object_device **p_tdev); | ||
| 266 | |||
| 267 | #endif | ||
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h index 170786e5c2ff..fd11a5bd892d 100644 --- a/include/drm/via_drm.h +++ b/include/drm/via_drm.h | |||
| @@ -24,7 +24,7 @@ | |||
| 24 | #ifndef _VIA_DRM_H_ | 24 | #ifndef _VIA_DRM_H_ |
| 25 | #define _VIA_DRM_H_ | 25 | #define _VIA_DRM_H_ |
| 26 | 26 | ||
| 27 | #include <linux/types.h> | 27 | #include "drm.h" |
| 28 | 28 | ||
| 29 | /* WARNING: These defines must be the same as what the Xserver uses. | 29 | /* WARNING: These defines must be the same as what the Xserver uses. |
| 30 | * if you change them, you must change the defines in the Xserver. | 30 | * if you change them, you must change the defines in the Xserver. |
