diff options
22 files changed, 7915 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 91567ac806f1..bc14ba7c3b6f 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
| @@ -30,4 +30,5 @@ obj-$(CONFIG_DRM_I830) += i830/ | |||
| 30 | obj-$(CONFIG_DRM_I915) += i915/ | 30 | obj-$(CONFIG_DRM_I915) += i915/ |
| 31 | obj-$(CONFIG_DRM_SIS) += sis/ | 31 | obj-$(CONFIG_DRM_SIS) += sis/ |
| 32 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ | 32 | obj-$(CONFIG_DRM_SAVAGE)+= savage/ |
| 33 | obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/ | ||
| 33 | obj-$(CONFIG_DRM_VIA) +=via/ | 34 | obj-$(CONFIG_DRM_VIA) +=via/ |
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig new file mode 100644 index 000000000000..f20b8bcbef39 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/Kconfig | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | config DRM_VMWGFX | ||
| 2 | tristate "DRM driver for VMware Virtual GPU" | ||
| 3 | depends on DRM && PCI | ||
| 4 | select FB_DEFERRED_IO | ||
| 5 | select FB_CFB_FILLRECT | ||
| 6 | select FB_CFB_COPYAREA | ||
| 7 | select FB_CFB_IMAGEBLIT | ||
| 8 | select DRM_TTM | ||
| 9 | help | ||
| 10 | KMS enabled DRM driver for SVGA2 virtual hardware. | ||
| 11 | |||
| 12 | If unsure say n. The compiled module will be | ||
| 13 | called vmwgfx.ko | ||
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile new file mode 100644 index 000000000000..1a3cb6816d1c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | |||
| 2 | ccflags-y := -Iinclude/drm | ||
| 3 | |||
| 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | ||
| 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | ||
| 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | ||
| 7 | vmwgfx_overlay.o | ||
| 8 | |||
| 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c new file mode 100644 index 000000000000..d6f2d2b882e9 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
| @@ -0,0 +1,229 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "ttm/ttm_bo_driver.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | |||
| 32 | static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM | | ||
| 33 | TTM_PL_FLAG_CACHED; | ||
| 34 | |||
| 35 | static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM | | ||
| 36 | TTM_PL_FLAG_CACHED | | ||
| 37 | TTM_PL_FLAG_NO_EVICT; | ||
| 38 | |||
| 39 | static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM | | ||
| 40 | TTM_PL_FLAG_CACHED; | ||
| 41 | |||
| 42 | struct ttm_placement vmw_vram_placement = { | ||
| 43 | .fpfn = 0, | ||
| 44 | .lpfn = 0, | ||
| 45 | .num_placement = 1, | ||
| 46 | .placement = &vram_placement_flags, | ||
| 47 | .num_busy_placement = 1, | ||
| 48 | .busy_placement = &vram_placement_flags | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct ttm_placement vmw_vram_ne_placement = { | ||
| 52 | .fpfn = 0, | ||
| 53 | .lpfn = 0, | ||
| 54 | .num_placement = 1, | ||
| 55 | .placement = &vram_ne_placement_flags, | ||
| 56 | .num_busy_placement = 1, | ||
| 57 | .busy_placement = &vram_ne_placement_flags | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct ttm_placement vmw_sys_placement = { | ||
| 61 | .fpfn = 0, | ||
| 62 | .lpfn = 0, | ||
| 63 | .num_placement = 1, | ||
| 64 | .placement = &sys_placement_flags, | ||
| 65 | .num_busy_placement = 1, | ||
| 66 | .busy_placement = &sys_placement_flags | ||
| 67 | }; | ||
| 68 | |||
| 69 | struct vmw_ttm_backend { | ||
| 70 | struct ttm_backend backend; | ||
| 71 | }; | ||
| 72 | |||
| 73 | static int vmw_ttm_populate(struct ttm_backend *backend, | ||
| 74 | unsigned long num_pages, struct page **pages, | ||
| 75 | struct page *dummy_read_page) | ||
| 76 | { | ||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem) | ||
| 81 | { | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | static int vmw_ttm_unbind(struct ttm_backend *backend) | ||
| 86 | { | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static void vmw_ttm_clear(struct ttm_backend *backend) | ||
| 91 | { | ||
| 92 | } | ||
| 93 | |||
| 94 | static void vmw_ttm_destroy(struct ttm_backend *backend) | ||
| 95 | { | ||
| 96 | struct vmw_ttm_backend *vmw_be = | ||
| 97 | container_of(backend, struct vmw_ttm_backend, backend); | ||
| 98 | |||
| 99 | kfree(vmw_be); | ||
| 100 | } | ||
| 101 | |||
| 102 | static struct ttm_backend_func vmw_ttm_func = { | ||
| 103 | .populate = vmw_ttm_populate, | ||
| 104 | .clear = vmw_ttm_clear, | ||
| 105 | .bind = vmw_ttm_bind, | ||
| 106 | .unbind = vmw_ttm_unbind, | ||
| 107 | .destroy = vmw_ttm_destroy, | ||
| 108 | }; | ||
| 109 | |||
| 110 | struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev) | ||
| 111 | { | ||
| 112 | struct vmw_ttm_backend *vmw_be; | ||
| 113 | |||
| 114 | vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL); | ||
| 115 | if (!vmw_be) | ||
| 116 | return NULL; | ||
| 117 | |||
| 118 | vmw_be->backend.func = &vmw_ttm_func; | ||
| 119 | |||
| 120 | return &vmw_be->backend; | ||
| 121 | } | ||
| 122 | |||
| 123 | int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | ||
| 124 | { | ||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
| 129 | struct ttm_mem_type_manager *man) | ||
| 130 | { | ||
| 131 | struct vmw_private *dev_priv = | ||
| 132 | container_of(bdev, struct vmw_private, bdev); | ||
| 133 | |||
| 134 | switch (type) { | ||
| 135 | case TTM_PL_SYSTEM: | ||
| 136 | /* System memory */ | ||
| 137 | |||
| 138 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 139 | man->available_caching = TTM_PL_MASK_CACHING; | ||
| 140 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
| 141 | break; | ||
| 142 | case TTM_PL_VRAM: | ||
| 143 | /* "On-card" video ram */ | ||
| 144 | man->gpu_offset = 0; | ||
| 145 | man->io_offset = dev_priv->vram_start; | ||
| 146 | man->io_size = dev_priv->vram_size; | ||
| 147 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
| 148 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
| 149 | man->io_addr = NULL; | ||
| 150 | man->available_caching = TTM_PL_MASK_CACHING; | ||
| 151 | man->default_caching = TTM_PL_FLAG_WC; | ||
| 152 | break; | ||
| 153 | default: | ||
| 154 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
| 155 | return -EINVAL; | ||
| 156 | } | ||
| 157 | return 0; | ||
| 158 | } | ||
| 159 | |||
| 160 | void vmw_evict_flags(struct ttm_buffer_object *bo, | ||
| 161 | struct ttm_placement *placement) | ||
| 162 | { | ||
| 163 | *placement = vmw_sys_placement; | ||
| 164 | } | ||
| 165 | |||
| 166 | /** | ||
| 167 | * FIXME: Proper access checks on buffers. | ||
| 168 | */ | ||
| 169 | |||
| 170 | static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) | ||
| 171 | { | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 175 | /** | ||
| 176 | * FIXME: We're using the old vmware polling method to sync. | ||
| 177 | * Do this with fences instead. | ||
| 178 | */ | ||
| 179 | |||
| 180 | static void *vmw_sync_obj_ref(void *sync_obj) | ||
| 181 | { | ||
| 182 | return sync_obj; | ||
| 183 | } | ||
| 184 | |||
| 185 | static void vmw_sync_obj_unref(void **sync_obj) | ||
| 186 | { | ||
| 187 | *sync_obj = NULL; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) | ||
| 191 | { | ||
| 192 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | ||
| 193 | |||
| 194 | mutex_lock(&dev_priv->hw_mutex); | ||
| 195 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
| 196 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) | ||
| 201 | { | ||
| 202 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | ||
| 203 | uint32_t sequence = (unsigned long) sync_obj; | ||
| 204 | |||
| 205 | return vmw_fence_signaled(dev_priv, sequence); | ||
| 206 | } | ||
| 207 | |||
| 208 | static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, | ||
| 209 | bool lazy, bool interruptible) | ||
| 210 | { | ||
| 211 | struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; | ||
| 212 | uint32_t sequence = (unsigned long) sync_obj; | ||
| 213 | |||
| 214 | return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ); | ||
| 215 | } | ||
| 216 | |||
| 217 | struct ttm_bo_driver vmw_bo_driver = { | ||
| 218 | .create_ttm_backend_entry = vmw_ttm_backend_init, | ||
| 219 | .invalidate_caches = vmw_invalidate_caches, | ||
| 220 | .init_mem_type = vmw_init_mem_type, | ||
| 221 | .evict_flags = vmw_evict_flags, | ||
| 222 | .move = NULL, | ||
| 223 | .verify_access = vmw_verify_access, | ||
| 224 | .sync_obj_signaled = vmw_sync_obj_signaled, | ||
| 225 | .sync_obj_wait = vmw_sync_obj_wait, | ||
| 226 | .sync_obj_flush = vmw_sync_obj_flush, | ||
| 227 | .sync_obj_unref = vmw_sync_obj_unref, | ||
| 228 | .sync_obj_ref = vmw_sync_obj_ref | ||
| 229 | }; | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c new file mode 100644 index 000000000000..7b48bb3b63b2 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -0,0 +1,735 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "drmP.h" | ||
| 29 | #include "vmwgfx_drv.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | #include "ttm/ttm_bo_driver.h" | ||
| 32 | #include "ttm/ttm_object.h" | ||
| 33 | #include "ttm/ttm_module.h" | ||
| 34 | |||
| 35 | #define VMWGFX_DRIVER_NAME "vmwgfx" | ||
| 36 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | ||
| 37 | #define VMWGFX_CHIP_SVGAII 0 | ||
| 38 | #define VMW_FB_RESERVATION 0 | ||
| 39 | |||
| 40 | /** | ||
| 41 | * Fully encoded drm commands. Might move to vmw_drm.h | ||
| 42 | */ | ||
| 43 | |||
| 44 | #define DRM_IOCTL_VMW_GET_PARAM \ | ||
| 45 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \ | ||
| 46 | struct drm_vmw_getparam_arg) | ||
| 47 | #define DRM_IOCTL_VMW_ALLOC_DMABUF \ | ||
| 48 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \ | ||
| 49 | union drm_vmw_alloc_dmabuf_arg) | ||
| 50 | #define DRM_IOCTL_VMW_UNREF_DMABUF \ | ||
| 51 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \ | ||
| 52 | struct drm_vmw_unref_dmabuf_arg) | ||
| 53 | #define DRM_IOCTL_VMW_CURSOR_BYPASS \ | ||
| 54 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \ | ||
| 55 | struct drm_vmw_cursor_bypass_arg) | ||
| 56 | |||
| 57 | #define DRM_IOCTL_VMW_CONTROL_STREAM \ | ||
| 58 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \ | ||
| 59 | struct drm_vmw_control_stream_arg) | ||
| 60 | #define DRM_IOCTL_VMW_CLAIM_STREAM \ | ||
| 61 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \ | ||
| 62 | struct drm_vmw_stream_arg) | ||
| 63 | #define DRM_IOCTL_VMW_UNREF_STREAM \ | ||
| 64 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \ | ||
| 65 | struct drm_vmw_stream_arg) | ||
| 66 | |||
| 67 | #define DRM_IOCTL_VMW_CREATE_CONTEXT \ | ||
| 68 | DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \ | ||
| 69 | struct drm_vmw_context_arg) | ||
| 70 | #define DRM_IOCTL_VMW_UNREF_CONTEXT \ | ||
| 71 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \ | ||
| 72 | struct drm_vmw_context_arg) | ||
| 73 | #define DRM_IOCTL_VMW_CREATE_SURFACE \ | ||
| 74 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \ | ||
| 75 | union drm_vmw_surface_create_arg) | ||
| 76 | #define DRM_IOCTL_VMW_UNREF_SURFACE \ | ||
| 77 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \ | ||
| 78 | struct drm_vmw_surface_arg) | ||
| 79 | #define DRM_IOCTL_VMW_REF_SURFACE \ | ||
| 80 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \ | ||
| 81 | union drm_vmw_surface_reference_arg) | ||
| 82 | #define DRM_IOCTL_VMW_EXECBUF \ | ||
| 83 | DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ | ||
| 84 | struct drm_vmw_execbuf_arg) | ||
| 85 | #define DRM_IOCTL_VMW_FIFO_DEBUG \ | ||
| 86 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FIFO_DEBUG, \ | ||
| 87 | struct drm_vmw_fifo_debug_arg) | ||
| 88 | #define DRM_IOCTL_VMW_FENCE_WAIT \ | ||
| 89 | DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ | ||
| 90 | struct drm_vmw_fence_wait_arg) | ||
| 91 | |||
| 92 | |||
| 93 | /** | ||
| 94 | * The core DRM version of this macro doesn't account for | ||
| 95 | * DRM_COMMAND_BASE. | ||
| 96 | */ | ||
| 97 | |||
| 98 | #define VMW_IOCTL_DEF(ioctl, func, flags) \ | ||
| 99 | [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func} | ||
| 100 | |||
| 101 | /** | ||
| 102 | * Ioctl definitions. | ||
| 103 | */ | ||
| 104 | |||
| 105 | static struct drm_ioctl_desc vmw_ioctls[] = { | ||
| 106 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0), | ||
| 107 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | ||
| 108 | 0), | ||
| 109 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | ||
| 110 | 0), | ||
| 111 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, | ||
| 112 | vmw_kms_cursor_bypass_ioctl, 0), | ||
| 113 | |||
| 114 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, | ||
| 115 | 0), | ||
| 116 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, | ||
| 117 | 0), | ||
| 118 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, | ||
| 119 | 0), | ||
| 120 | |||
| 121 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | ||
| 122 | 0), | ||
| 123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | ||
| 124 | 0), | ||
| 125 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | ||
| 126 | 0), | ||
| 127 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | ||
| 128 | 0), | ||
| 129 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, | ||
| 130 | 0), | ||
| 131 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, | ||
| 132 | 0), | ||
| 133 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | ||
| 134 | 0), | ||
| 135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | ||
| 136 | 0) | ||
| 137 | }; | ||
| 138 | |||
| 139 | static struct pci_device_id vmw_pci_id_list[] = { | ||
| 140 | {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, | ||
| 141 | {0, 0, 0} | ||
| 142 | }; | ||
| 143 | |||
| 144 | static char *vmw_devname = "vmwgfx"; | ||
| 145 | |||
| 146 | static int vmw_probe(struct pci_dev *, const struct pci_device_id *); | ||
| 147 | static void vmw_master_init(struct vmw_master *); | ||
| 148 | |||
| 149 | static void vmw_print_capabilities(uint32_t capabilities) | ||
| 150 | { | ||
| 151 | DRM_INFO("Capabilities:\n"); | ||
| 152 | if (capabilities & SVGA_CAP_RECT_COPY) | ||
| 153 | DRM_INFO(" Rect copy.\n"); | ||
| 154 | if (capabilities & SVGA_CAP_CURSOR) | ||
| 155 | DRM_INFO(" Cursor.\n"); | ||
| 156 | if (capabilities & SVGA_CAP_CURSOR_BYPASS) | ||
| 157 | DRM_INFO(" Cursor bypass.\n"); | ||
| 158 | if (capabilities & SVGA_CAP_CURSOR_BYPASS_2) | ||
| 159 | DRM_INFO(" Cursor bypass 2.\n"); | ||
| 160 | if (capabilities & SVGA_CAP_8BIT_EMULATION) | ||
| 161 | DRM_INFO(" 8bit emulation.\n"); | ||
| 162 | if (capabilities & SVGA_CAP_ALPHA_CURSOR) | ||
| 163 | DRM_INFO(" Alpha cursor.\n"); | ||
| 164 | if (capabilities & SVGA_CAP_3D) | ||
| 165 | DRM_INFO(" 3D.\n"); | ||
| 166 | if (capabilities & SVGA_CAP_EXTENDED_FIFO) | ||
| 167 | DRM_INFO(" Extended Fifo.\n"); | ||
| 168 | if (capabilities & SVGA_CAP_MULTIMON) | ||
| 169 | DRM_INFO(" Multimon.\n"); | ||
| 170 | if (capabilities & SVGA_CAP_PITCHLOCK) | ||
| 171 | DRM_INFO(" Pitchlock.\n"); | ||
| 172 | if (capabilities & SVGA_CAP_IRQMASK) | ||
| 173 | DRM_INFO(" Irq mask.\n"); | ||
| 174 | if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) | ||
| 175 | DRM_INFO(" Display Topology.\n"); | ||
| 176 | if (capabilities & SVGA_CAP_GMR) | ||
| 177 | DRM_INFO(" GMR.\n"); | ||
| 178 | if (capabilities & SVGA_CAP_TRACES) | ||
| 179 | DRM_INFO(" Traces.\n"); | ||
| 180 | } | ||
| 181 | |||
| 182 | static int vmw_request_device(struct vmw_private *dev_priv) | ||
| 183 | { | ||
| 184 | int ret; | ||
| 185 | |||
| 186 | vmw_kms_save_vga(dev_priv); | ||
| 187 | |||
| 188 | ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); | ||
| 189 | if (unlikely(ret != 0)) { | ||
| 190 | DRM_ERROR("Unable to initialize FIFO.\n"); | ||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | |||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | |||
| 197 | static void vmw_release_device(struct vmw_private *dev_priv) | ||
| 198 | { | ||
| 199 | vmw_fifo_release(dev_priv, &dev_priv->fifo); | ||
| 200 | vmw_kms_restore_vga(dev_priv); | ||
| 201 | } | ||
| 202 | |||
| 203 | |||
| 204 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | ||
| 205 | { | ||
| 206 | struct vmw_private *dev_priv; | ||
| 207 | int ret; | ||
| 208 | |||
| 209 | dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); | ||
| 210 | if (unlikely(dev_priv == NULL)) { | ||
| 211 | DRM_ERROR("Failed allocating a device private struct.\n"); | ||
| 212 | return -ENOMEM; | ||
| 213 | } | ||
| 214 | memset(dev_priv, 0, sizeof(*dev_priv)); | ||
| 215 | |||
| 216 | dev_priv->dev = dev; | ||
| 217 | dev_priv->vmw_chipset = chipset; | ||
| 218 | mutex_init(&dev_priv->hw_mutex); | ||
| 219 | mutex_init(&dev_priv->cmdbuf_mutex); | ||
| 220 | rwlock_init(&dev_priv->resource_lock); | ||
| 221 | idr_init(&dev_priv->context_idr); | ||
| 222 | idr_init(&dev_priv->surface_idr); | ||
| 223 | idr_init(&dev_priv->stream_idr); | ||
| 224 | ida_init(&dev_priv->gmr_ida); | ||
| 225 | mutex_init(&dev_priv->init_mutex); | ||
| 226 | init_waitqueue_head(&dev_priv->fence_queue); | ||
| 227 | init_waitqueue_head(&dev_priv->fifo_queue); | ||
| 228 | atomic_set(&dev_priv->fence_queue_waiters, 0); | ||
| 229 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | ||
| 230 | INIT_LIST_HEAD(&dev_priv->gmr_lru); | ||
| 231 | |||
| 232 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | ||
| 233 | dev_priv->vram_start = pci_resource_start(dev->pdev, 1); | ||
| 234 | dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); | ||
| 235 | |||
| 236 | mutex_lock(&dev_priv->hw_mutex); | ||
| 237 | dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); | ||
| 238 | |||
| 239 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | ||
| 240 | dev_priv->max_gmr_descriptors = | ||
| 241 | vmw_read(dev_priv, | ||
| 242 | SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH); | ||
| 243 | dev_priv->max_gmr_ids = | ||
| 244 | vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); | ||
| 245 | } | ||
| 246 | |||
| 247 | dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); | ||
| 248 | dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); | ||
| 249 | dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); | ||
| 250 | dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); | ||
| 251 | |||
| 252 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 253 | |||
| 254 | vmw_print_capabilities(dev_priv->capabilities); | ||
| 255 | |||
| 256 | if (dev_priv->capabilities & SVGA_CAP_GMR) { | ||
| 257 | DRM_INFO("Max GMR ids is %u\n", | ||
| 258 | (unsigned)dev_priv->max_gmr_ids); | ||
| 259 | DRM_INFO("Max GMR descriptors is %u\n", | ||
| 260 | (unsigned)dev_priv->max_gmr_descriptors); | ||
| 261 | } | ||
| 262 | DRM_INFO("VRAM at 0x%08x size is %u kiB\n", | ||
| 263 | dev_priv->vram_start, dev_priv->vram_size / 1024); | ||
| 264 | DRM_INFO("MMIO at 0x%08x size is %u kiB\n", | ||
| 265 | dev_priv->mmio_start, dev_priv->mmio_size / 1024); | ||
| 266 | |||
| 267 | ret = vmw_ttm_global_init(dev_priv); | ||
| 268 | if (unlikely(ret != 0)) | ||
| 269 | goto out_err0; | ||
| 270 | |||
| 271 | |||
| 272 | vmw_master_init(&dev_priv->fbdev_master); | ||
| 273 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
| 274 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
| 275 | |||
| 276 | |||
| 277 | ret = ttm_bo_device_init(&dev_priv->bdev, | ||
| 278 | dev_priv->bo_global_ref.ref.object, | ||
| 279 | &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET, | ||
| 280 | false); | ||
| 281 | if (unlikely(ret != 0)) { | ||
| 282 | DRM_ERROR("Failed initializing TTM buffer object driver.\n"); | ||
| 283 | goto out_err1; | ||
| 284 | } | ||
| 285 | |||
| 286 | ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, | ||
| 287 | (dev_priv->vram_size >> PAGE_SHIFT)); | ||
| 288 | if (unlikely(ret != 0)) { | ||
| 289 | DRM_ERROR("Failed initializing memory manager for VRAM.\n"); | ||
| 290 | goto out_err2; | ||
| 291 | } | ||
| 292 | |||
| 293 | dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start, | ||
| 294 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
| 295 | |||
| 296 | dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start, | ||
| 297 | dev_priv->mmio_size); | ||
| 298 | |||
| 299 | if (unlikely(dev_priv->mmio_virt == NULL)) { | ||
| 300 | ret = -ENOMEM; | ||
| 301 | DRM_ERROR("Failed mapping MMIO.\n"); | ||
| 302 | goto out_err3; | ||
| 303 | } | ||
| 304 | |||
| 305 | dev_priv->tdev = ttm_object_device_init | ||
| 306 | (dev_priv->mem_global_ref.object, 12); | ||
| 307 | |||
| 308 | if (unlikely(dev_priv->tdev == NULL)) { | ||
| 309 | DRM_ERROR("Unable to initialize TTM object management.\n"); | ||
| 310 | ret = -ENOMEM; | ||
| 311 | goto out_err4; | ||
| 312 | } | ||
| 313 | |||
| 314 | dev->dev_private = dev_priv; | ||
| 315 | |||
| 316 | if (!dev->devname) | ||
| 317 | dev->devname = vmw_devname; | ||
| 318 | |||
| 319 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { | ||
| 320 | ret = drm_irq_install(dev); | ||
| 321 | if (unlikely(ret != 0)) { | ||
| 322 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
| 323 | goto out_no_irq; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | |||
| 327 | ret = pci_request_regions(dev->pdev, "vmwgfx probe"); | ||
| 328 | dev_priv->stealth = (ret != 0); | ||
| 329 | if (dev_priv->stealth) { | ||
| 330 | /** | ||
| 331 | * Request at least the mmio PCI resource. | ||
| 332 | */ | ||
| 333 | |||
| 334 | DRM_INFO("It appears like vesafb is loaded. " | ||
| 335 | "Ignore above error if any. Entering stealth mode.\n"); | ||
| 336 | ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); | ||
| 337 | if (unlikely(ret != 0)) { | ||
| 338 | DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); | ||
| 339 | goto out_no_device; | ||
| 340 | } | ||
| 341 | vmw_kms_init(dev_priv); | ||
| 342 | vmw_overlay_init(dev_priv); | ||
| 343 | } else { | ||
| 344 | ret = vmw_request_device(dev_priv); | ||
| 345 | if (unlikely(ret != 0)) | ||
| 346 | goto out_no_device; | ||
| 347 | vmw_kms_init(dev_priv); | ||
| 348 | vmw_overlay_init(dev_priv); | ||
| 349 | vmw_fb_init(dev_priv); | ||
| 350 | } | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | |||
| 354 | out_no_device: | ||
| 355 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
| 356 | drm_irq_uninstall(dev_priv->dev); | ||
| 357 | if (dev->devname == vmw_devname) | ||
| 358 | dev->devname = NULL; | ||
| 359 | out_no_irq: | ||
| 360 | ttm_object_device_release(&dev_priv->tdev); | ||
| 361 | out_err4: | ||
| 362 | iounmap(dev_priv->mmio_virt); | ||
| 363 | out_err3: | ||
| 364 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | ||
| 365 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
| 366 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 367 | out_err2: | ||
| 368 | (void)ttm_bo_device_release(&dev_priv->bdev); | ||
| 369 | out_err1: | ||
| 370 | vmw_ttm_global_release(dev_priv); | ||
| 371 | out_err0: | ||
| 372 | ida_destroy(&dev_priv->gmr_ida); | ||
| 373 | idr_destroy(&dev_priv->surface_idr); | ||
| 374 | idr_destroy(&dev_priv->context_idr); | ||
| 375 | idr_destroy(&dev_priv->stream_idr); | ||
| 376 | kfree(dev_priv); | ||
| 377 | return ret; | ||
| 378 | } | ||
| 379 | |||
| 380 | static int vmw_driver_unload(struct drm_device *dev) | ||
| 381 | { | ||
| 382 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 383 | |||
| 384 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | ||
| 385 | |||
| 386 | if (!dev_priv->stealth) { | ||
| 387 | vmw_fb_close(dev_priv); | ||
| 388 | vmw_kms_close(dev_priv); | ||
| 389 | vmw_overlay_close(dev_priv); | ||
| 390 | vmw_release_device(dev_priv); | ||
| 391 | pci_release_regions(dev->pdev); | ||
| 392 | } else { | ||
| 393 | vmw_kms_close(dev_priv); | ||
| 394 | vmw_overlay_close(dev_priv); | ||
| 395 | pci_release_region(dev->pdev, 2); | ||
| 396 | } | ||
| 397 | if (dev_priv->capabilities & SVGA_CAP_IRQMASK) | ||
| 398 | drm_irq_uninstall(dev_priv->dev); | ||
| 399 | if (dev->devname == vmw_devname) | ||
| 400 | dev->devname = NULL; | ||
| 401 | ttm_object_device_release(&dev_priv->tdev); | ||
| 402 | iounmap(dev_priv->mmio_virt); | ||
| 403 | drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, | ||
| 404 | dev_priv->mmio_size, DRM_MTRR_WC); | ||
| 405 | (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 406 | (void)ttm_bo_device_release(&dev_priv->bdev); | ||
| 407 | vmw_ttm_global_release(dev_priv); | ||
| 408 | ida_destroy(&dev_priv->gmr_ida); | ||
| 409 | idr_destroy(&dev_priv->surface_idr); | ||
| 410 | idr_destroy(&dev_priv->context_idr); | ||
| 411 | idr_destroy(&dev_priv->stream_idr); | ||
| 412 | |||
| 413 | kfree(dev_priv); | ||
| 414 | |||
| 415 | return 0; | ||
| 416 | } | ||
| 417 | |||
| 418 | static void vmw_postclose(struct drm_device *dev, | ||
| 419 | struct drm_file *file_priv) | ||
| 420 | { | ||
| 421 | struct vmw_fpriv *vmw_fp; | ||
| 422 | |||
| 423 | vmw_fp = vmw_fpriv(file_priv); | ||
| 424 | ttm_object_file_release(&vmw_fp->tfile); | ||
| 425 | if (vmw_fp->locked_master) | ||
| 426 | drm_master_put(&vmw_fp->locked_master); | ||
| 427 | kfree(vmw_fp); | ||
| 428 | } | ||
| 429 | |||
| 430 | static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) | ||
| 431 | { | ||
| 432 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 433 | struct vmw_fpriv *vmw_fp; | ||
| 434 | int ret = -ENOMEM; | ||
| 435 | |||
| 436 | vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); | ||
| 437 | if (unlikely(vmw_fp == NULL)) | ||
| 438 | return ret; | ||
| 439 | |||
| 440 | vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); | ||
| 441 | if (unlikely(vmw_fp->tfile == NULL)) | ||
| 442 | goto out_no_tfile; | ||
| 443 | |||
| 444 | file_priv->driver_priv = vmw_fp; | ||
| 445 | |||
| 446 | if (unlikely(dev_priv->bdev.dev_mapping == NULL)) | ||
| 447 | dev_priv->bdev.dev_mapping = | ||
| 448 | file_priv->filp->f_path.dentry->d_inode->i_mapping; | ||
| 449 | |||
| 450 | return 0; | ||
| 451 | |||
| 452 | out_no_tfile: | ||
| 453 | kfree(vmw_fp); | ||
| 454 | return ret; | ||
| 455 | } | ||
| 456 | |||
| 457 | static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | ||
| 458 | unsigned long arg) | ||
| 459 | { | ||
| 460 | struct drm_file *file_priv = filp->private_data; | ||
| 461 | struct drm_device *dev = file_priv->minor->dev; | ||
| 462 | unsigned int nr = DRM_IOCTL_NR(cmd); | ||
| 463 | long ret; | ||
| 464 | |||
| 465 | /* | ||
| 466 | * The driver private ioctls and TTM ioctls should be | ||
| 467 | * thread-safe. | ||
| 468 | */ | ||
| 469 | |||
| 470 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | ||
| 471 | && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) { | ||
| 472 | struct drm_ioctl_desc *ioctl = | ||
| 473 | &vmw_ioctls[nr - DRM_COMMAND_BASE]; | ||
| 474 | |||
| 475 | if (unlikely(ioctl->cmd != cmd)) { | ||
| 476 | DRM_ERROR("Invalid command format, ioctl %d\n", | ||
| 477 | nr - DRM_COMMAND_BASE); | ||
| 478 | return -EINVAL; | ||
| 479 | } | ||
| 480 | return drm_ioctl(filp->f_path.dentry->d_inode, | ||
| 481 | filp, cmd, arg); | ||
| 482 | } | ||
| 483 | |||
| 484 | /* | ||
| 485 | * Not all old drm ioctls are thread-safe. | ||
| 486 | */ | ||
| 487 | |||
| 488 | lock_kernel(); | ||
| 489 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | ||
| 490 | unlock_kernel(); | ||
| 491 | return ret; | ||
| 492 | } | ||
| 493 | |||
| 494 | static int vmw_firstopen(struct drm_device *dev) | ||
| 495 | { | ||
| 496 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 497 | dev_priv->is_opened = true; | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static void vmw_lastclose(struct drm_device *dev) | ||
| 503 | { | ||
| 504 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 505 | struct drm_crtc *crtc; | ||
| 506 | struct drm_mode_set set; | ||
| 507 | int ret; | ||
| 508 | |||
| 509 | /** | ||
| 510 | * Do nothing on the lastclose call from drm_unload. | ||
| 511 | */ | ||
| 512 | |||
| 513 | if (!dev_priv->is_opened) | ||
| 514 | return; | ||
| 515 | |||
| 516 | dev_priv->is_opened = false; | ||
| 517 | set.x = 0; | ||
| 518 | set.y = 0; | ||
| 519 | set.fb = NULL; | ||
| 520 | set.mode = NULL; | ||
| 521 | set.connectors = NULL; | ||
| 522 | set.num_connectors = 0; | ||
| 523 | |||
| 524 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 525 | set.crtc = crtc; | ||
| 526 | ret = crtc->funcs->set_config(&set); | ||
| 527 | WARN_ON(ret != 0); | ||
| 528 | } | ||
| 529 | |||
| 530 | } | ||
| 531 | |||
| 532 | static void vmw_master_init(struct vmw_master *vmaster) | ||
| 533 | { | ||
| 534 | ttm_lock_init(&vmaster->lock); | ||
| 535 | } | ||
| 536 | |||
| 537 | static int vmw_master_create(struct drm_device *dev, | ||
| 538 | struct drm_master *master) | ||
| 539 | { | ||
| 540 | struct vmw_master *vmaster; | ||
| 541 | |||
| 542 | DRM_INFO("Master create.\n"); | ||
| 543 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | ||
| 544 | if (unlikely(vmaster == NULL)) | ||
| 545 | return -ENOMEM; | ||
| 546 | |||
| 547 | ttm_lock_init(&vmaster->lock); | ||
| 548 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
| 549 | master->driver_priv = vmaster; | ||
| 550 | |||
| 551 | return 0; | ||
| 552 | } | ||
| 553 | |||
| 554 | static void vmw_master_destroy(struct drm_device *dev, | ||
| 555 | struct drm_master *master) | ||
| 556 | { | ||
| 557 | struct vmw_master *vmaster = vmw_master(master); | ||
| 558 | |||
| 559 | DRM_INFO("Master destroy.\n"); | ||
| 560 | master->driver_priv = NULL; | ||
| 561 | kfree(vmaster); | ||
| 562 | } | ||
| 563 | |||
| 564 | |||
| 565 | static int vmw_master_set(struct drm_device *dev, | ||
| 566 | struct drm_file *file_priv, | ||
| 567 | bool from_open) | ||
| 568 | { | ||
| 569 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 570 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 571 | struct vmw_master *active = dev_priv->active_master; | ||
| 572 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 573 | int ret = 0; | ||
| 574 | |||
| 575 | DRM_INFO("Master set.\n"); | ||
| 576 | if (dev_priv->stealth) { | ||
| 577 | ret = vmw_request_device(dev_priv); | ||
| 578 | if (unlikely(ret != 0)) | ||
| 579 | return ret; | ||
| 580 | } | ||
| 581 | |||
| 582 | if (active) { | ||
| 583 | BUG_ON(active != &dev_priv->fbdev_master); | ||
| 584 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | ||
| 585 | if (unlikely(ret != 0)) | ||
| 586 | goto out_no_active_lock; | ||
| 587 | |||
| 588 | ttm_lock_set_kill(&active->lock, true, SIGTERM); | ||
| 589 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 590 | if (unlikely(ret != 0)) { | ||
| 591 | DRM_ERROR("Unable to clean VRAM on " | ||
| 592 | "master drop.\n"); | ||
| 593 | } | ||
| 594 | |||
| 595 | dev_priv->active_master = NULL; | ||
| 596 | } | ||
| 597 | |||
| 598 | ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); | ||
| 599 | if (!from_open) { | ||
| 600 | ttm_vt_unlock(&vmaster->lock); | ||
| 601 | BUG_ON(vmw_fp->locked_master != file_priv->master); | ||
| 602 | drm_master_put(&vmw_fp->locked_master); | ||
| 603 | } | ||
| 604 | |||
| 605 | dev_priv->active_master = vmaster; | ||
| 606 | |||
| 607 | return 0; | ||
| 608 | |||
| 609 | out_no_active_lock: | ||
| 610 | vmw_release_device(dev_priv); | ||
| 611 | return ret; | ||
| 612 | } | ||
| 613 | |||
| 614 | static void vmw_master_drop(struct drm_device *dev, | ||
| 615 | struct drm_file *file_priv, | ||
| 616 | bool from_release) | ||
| 617 | { | ||
| 618 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 619 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | ||
| 620 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 621 | int ret; | ||
| 622 | |||
| 623 | DRM_INFO("Master drop.\n"); | ||
| 624 | |||
| 625 | /** | ||
| 626 | * Make sure the master doesn't disappear while we have | ||
| 627 | * it locked. | ||
| 628 | */ | ||
| 629 | |||
| 630 | vmw_fp->locked_master = drm_master_get(file_priv->master); | ||
| 631 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | ||
| 632 | |||
| 633 | if (unlikely((ret != 0))) { | ||
| 634 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | ||
| 635 | drm_master_put(&vmw_fp->locked_master); | ||
| 636 | } | ||
| 637 | |||
| 638 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | ||
| 639 | |||
| 640 | if (dev_priv->stealth) { | ||
| 641 | ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); | ||
| 642 | if (unlikely(ret != 0)) | ||
| 643 | DRM_ERROR("Unable to clean VRAM on master drop.\n"); | ||
| 644 | vmw_release_device(dev_priv); | ||
| 645 | } | ||
| 646 | dev_priv->active_master = &dev_priv->fbdev_master; | ||
| 647 | ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); | ||
| 648 | ttm_vt_unlock(&dev_priv->fbdev_master.lock); | ||
| 649 | |||
| 650 | if (!dev_priv->stealth) | ||
| 651 | vmw_fb_on(dev_priv); | ||
| 652 | } | ||
| 653 | |||
| 654 | |||
| 655 | static void vmw_remove(struct pci_dev *pdev) | ||
| 656 | { | ||
| 657 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
| 658 | |||
| 659 | drm_put_dev(dev); | ||
| 660 | } | ||
| 661 | |||
| 662 | static struct drm_driver driver = { | ||
| 663 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | ||
| 664 | DRIVER_MODESET, | ||
| 665 | .load = vmw_driver_load, | ||
| 666 | .unload = vmw_driver_unload, | ||
| 667 | .firstopen = vmw_firstopen, | ||
| 668 | .lastclose = vmw_lastclose, | ||
| 669 | .irq_preinstall = vmw_irq_preinstall, | ||
| 670 | .irq_postinstall = vmw_irq_postinstall, | ||
| 671 | .irq_uninstall = vmw_irq_uninstall, | ||
| 672 | .irq_handler = vmw_irq_handler, | ||
| 673 | .reclaim_buffers_locked = NULL, | ||
| 674 | .get_map_ofs = drm_core_get_map_ofs, | ||
| 675 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
| 676 | .ioctls = vmw_ioctls, | ||
| 677 | .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls), | ||
| 678 | .dma_quiescent = NULL, /*vmw_dma_quiescent, */ | ||
| 679 | .master_create = vmw_master_create, | ||
| 680 | .master_destroy = vmw_master_destroy, | ||
| 681 | .master_set = vmw_master_set, | ||
| 682 | .master_drop = vmw_master_drop, | ||
| 683 | .open = vmw_driver_open, | ||
| 684 | .postclose = vmw_postclose, | ||
| 685 | .fops = { | ||
| 686 | .owner = THIS_MODULE, | ||
| 687 | .open = drm_open, | ||
| 688 | .release = drm_release, | ||
| 689 | .unlocked_ioctl = vmw_unlocked_ioctl, | ||
| 690 | .mmap = vmw_mmap, | ||
| 691 | .poll = drm_poll, | ||
| 692 | .fasync = drm_fasync, | ||
| 693 | #if defined(CONFIG_COMPAT) | ||
| 694 | .compat_ioctl = drm_compat_ioctl, | ||
| 695 | #endif | ||
| 696 | }, | ||
| 697 | .pci_driver = { | ||
| 698 | .name = VMWGFX_DRIVER_NAME, | ||
| 699 | .id_table = vmw_pci_id_list, | ||
| 700 | .probe = vmw_probe, | ||
| 701 | .remove = vmw_remove | ||
| 702 | }, | ||
| 703 | .name = VMWGFX_DRIVER_NAME, | ||
| 704 | .desc = VMWGFX_DRIVER_DESC, | ||
| 705 | .date = VMWGFX_DRIVER_DATE, | ||
| 706 | .major = VMWGFX_DRIVER_MAJOR, | ||
| 707 | .minor = VMWGFX_DRIVER_MINOR, | ||
| 708 | .patchlevel = VMWGFX_DRIVER_PATCHLEVEL | ||
| 709 | }; | ||
| 710 | |||
| 711 | static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
| 712 | { | ||
| 713 | return drm_get_dev(pdev, ent, &driver); | ||
| 714 | } | ||
| 715 | |||
| 716 | static int __init vmwgfx_init(void) | ||
| 717 | { | ||
| 718 | int ret; | ||
| 719 | ret = drm_init(&driver); | ||
| 720 | if (ret) | ||
| 721 | DRM_ERROR("Failed initializing DRM.\n"); | ||
| 722 | return ret; | ||
| 723 | } | ||
| 724 | |||
| 725 | static void __exit vmwgfx_exit(void) | ||
| 726 | { | ||
| 727 | drm_exit(&driver); | ||
| 728 | } | ||
| 729 | |||
| 730 | module_init(vmwgfx_init); | ||
| 731 | module_exit(vmwgfx_exit); | ||
| 732 | |||
| 733 | MODULE_AUTHOR("VMware Inc. and others"); | ||
| 734 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | ||
| 735 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h new file mode 100644 index 000000000000..43546d09d1b0 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -0,0 +1,511 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #ifndef _VMWGFX_DRV_H_ | ||
| 29 | #define _VMWGFX_DRV_H_ | ||
| 30 | |||
| 31 | #include "vmwgfx_reg.h" | ||
| 32 | #include "drmP.h" | ||
| 33 | #include "vmwgfx_drm.h" | ||
| 34 | #include "drm_hashtab.h" | ||
| 35 | #include "ttm/ttm_bo_driver.h" | ||
| 36 | #include "ttm/ttm_object.h" | ||
| 37 | #include "ttm/ttm_lock.h" | ||
| 38 | #include "ttm/ttm_execbuf_util.h" | ||
| 39 | #include "ttm/ttm_module.h" | ||
| 40 | |||
| 41 | #define VMWGFX_DRIVER_DATE "20090724" | ||
| 42 | #define VMWGFX_DRIVER_MAJOR 0 | ||
| 43 | #define VMWGFX_DRIVER_MINOR 1 | ||
| 44 | #define VMWGFX_DRIVER_PATCHLEVEL 2 | ||
| 45 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | ||
| 46 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | ||
| 47 | #define VMWGFX_MAX_RELOCATIONS 2048 | ||
| 48 | #define VMWGFX_MAX_GMRS 2048 | ||
| 49 | |||
| 50 | struct vmw_fpriv { | ||
| 51 | struct drm_master *locked_master; | ||
| 52 | struct ttm_object_file *tfile; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct vmw_dma_buffer { | ||
| 56 | struct ttm_buffer_object base; | ||
| 57 | struct list_head validate_list; | ||
| 58 | struct list_head gmr_lru; | ||
| 59 | uint32_t gmr_id; | ||
| 60 | bool gmr_bound; | ||
| 61 | uint32_t cur_validate_node; | ||
| 62 | bool on_validate_list; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct vmw_resource { | ||
| 66 | struct kref kref; | ||
| 67 | struct vmw_private *dev_priv; | ||
| 68 | struct idr *idr; | ||
| 69 | int id; | ||
| 70 | enum ttm_object_type res_type; | ||
| 71 | bool avail; | ||
| 72 | void (*hw_destroy) (struct vmw_resource *res); | ||
| 73 | void (*res_free) (struct vmw_resource *res); | ||
| 74 | |||
| 75 | /* TODO is a generic snooper needed? */ | ||
| 76 | #if 0 | ||
| 77 | void (*snoop)(struct vmw_resource *res, | ||
| 78 | struct ttm_object_file *tfile, | ||
| 79 | SVGA3dCmdHeader *header); | ||
| 80 | void *snoop_priv; | ||
| 81 | #endif | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct vmw_cursor_snooper { | ||
| 85 | struct drm_crtc *crtc; | ||
| 86 | size_t age; | ||
| 87 | uint32_t *image; | ||
| 88 | }; | ||
| 89 | |||
| 90 | struct vmw_surface { | ||
| 91 | struct vmw_resource res; | ||
| 92 | uint32_t flags; | ||
| 93 | uint32_t format; | ||
| 94 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; | ||
| 95 | struct drm_vmw_size *sizes; | ||
| 96 | uint32_t num_sizes; | ||
| 97 | |||
| 98 | /* TODO so far just a extra pointer */ | ||
| 99 | struct vmw_cursor_snooper snooper; | ||
| 100 | }; | ||
| 101 | |||
| 102 | struct vmw_fifo_state { | ||
| 103 | unsigned long reserved_size; | ||
| 104 | __le32 *dynamic_buffer; | ||
| 105 | __le32 *static_buffer; | ||
| 106 | __le32 *last_buffer; | ||
| 107 | uint32_t last_data_size; | ||
| 108 | uint32_t last_buffer_size; | ||
| 109 | bool last_buffer_add; | ||
| 110 | unsigned long static_buffer_size; | ||
| 111 | bool using_bounce_buffer; | ||
| 112 | uint32_t capabilities; | ||
| 113 | struct rw_semaphore rwsem; | ||
| 114 | }; | ||
| 115 | |||
| 116 | struct vmw_relocation { | ||
| 117 | SVGAGuestPtr *location; | ||
| 118 | uint32_t index; | ||
| 119 | }; | ||
| 120 | |||
| 121 | struct vmw_sw_context{ | ||
| 122 | struct ida bo_list; | ||
| 123 | uint32_t last_cid; | ||
| 124 | bool cid_valid; | ||
| 125 | uint32_t last_sid; | ||
| 126 | bool sid_valid; | ||
| 127 | struct ttm_object_file *tfile; | ||
| 128 | struct list_head validate_nodes; | ||
| 129 | struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; | ||
| 130 | uint32_t cur_reloc; | ||
| 131 | struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS]; | ||
| 132 | uint32_t cur_val_buf; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct vmw_legacy_display; | ||
| 136 | struct vmw_overlay; | ||
| 137 | |||
| 138 | struct vmw_master { | ||
| 139 | struct ttm_lock lock; | ||
| 140 | }; | ||
| 141 | |||
| 142 | struct vmw_private { | ||
| 143 | struct ttm_bo_device bdev; | ||
| 144 | struct ttm_bo_global_ref bo_global_ref; | ||
| 145 | struct ttm_global_reference mem_global_ref; | ||
| 146 | |||
| 147 | struct vmw_fifo_state fifo; | ||
| 148 | |||
| 149 | struct drm_device *dev; | ||
| 150 | unsigned long vmw_chipset; | ||
| 151 | unsigned int io_start; | ||
| 152 | uint32_t vram_start; | ||
| 153 | uint32_t vram_size; | ||
| 154 | uint32_t mmio_start; | ||
| 155 | uint32_t mmio_size; | ||
| 156 | uint32_t fb_max_width; | ||
| 157 | uint32_t fb_max_height; | ||
| 158 | __le32 __iomem *mmio_virt; | ||
| 159 | int mmio_mtrr; | ||
| 160 | uint32_t capabilities; | ||
| 161 | uint32_t max_gmr_descriptors; | ||
| 162 | uint32_t max_gmr_ids; | ||
| 163 | struct mutex hw_mutex; | ||
| 164 | |||
| 165 | /* | ||
| 166 | * VGA registers. | ||
| 167 | */ | ||
| 168 | |||
| 169 | uint32_t vga_width; | ||
| 170 | uint32_t vga_height; | ||
| 171 | uint32_t vga_depth; | ||
| 172 | uint32_t vga_bpp; | ||
| 173 | uint32_t vga_pseudo; | ||
| 174 | uint32_t vga_red_mask; | ||
| 175 | uint32_t vga_blue_mask; | ||
| 176 | uint32_t vga_green_mask; | ||
| 177 | |||
| 178 | /* | ||
| 179 | * Framebuffer info. | ||
| 180 | */ | ||
| 181 | |||
| 182 | void *fb_info; | ||
| 183 | struct vmw_legacy_display *ldu_priv; | ||
| 184 | struct vmw_overlay *overlay_priv; | ||
| 185 | |||
| 186 | /* | ||
| 187 | * Context and surface management. | ||
| 188 | */ | ||
| 189 | |||
| 190 | rwlock_t resource_lock; | ||
| 191 | struct idr context_idr; | ||
| 192 | struct idr surface_idr; | ||
| 193 | struct idr stream_idr; | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Block lastclose from racing with firstopen. | ||
| 197 | */ | ||
| 198 | |||
| 199 | struct mutex init_mutex; | ||
| 200 | |||
| 201 | /* | ||
| 202 | * A resource manager for kernel-only surfaces and | ||
| 203 | * contexts. | ||
| 204 | */ | ||
| 205 | |||
| 206 | struct ttm_object_device *tdev; | ||
| 207 | |||
| 208 | /* | ||
| 209 | * Fencing and IRQs. | ||
| 210 | */ | ||
| 211 | |||
| 212 | uint32_t fence_seq; | ||
| 213 | wait_queue_head_t fence_queue; | ||
| 214 | wait_queue_head_t fifo_queue; | ||
| 215 | atomic_t fence_queue_waiters; | ||
| 216 | atomic_t fifo_queue_waiters; | ||
| 217 | uint32_t last_read_sequence; | ||
| 218 | spinlock_t irq_lock; | ||
| 219 | |||
| 220 | /* | ||
| 221 | * Device state | ||
| 222 | */ | ||
| 223 | |||
| 224 | uint32_t traces_state; | ||
| 225 | uint32_t enable_state; | ||
| 226 | uint32_t config_done_state; | ||
| 227 | |||
| 228 | /** | ||
| 229 | * Execbuf | ||
| 230 | */ | ||
| 231 | /** | ||
| 232 | * Protected by the cmdbuf mutex. | ||
| 233 | */ | ||
| 234 | |||
| 235 | struct vmw_sw_context ctx; | ||
| 236 | uint32_t val_seq; | ||
| 237 | struct mutex cmdbuf_mutex; | ||
| 238 | |||
| 239 | /** | ||
| 240 | * GMR management. Protected by the lru spinlock. | ||
| 241 | */ | ||
| 242 | |||
| 243 | struct ida gmr_ida; | ||
| 244 | struct list_head gmr_lru; | ||
| 245 | |||
| 246 | |||
| 247 | /** | ||
| 248 | * Operating mode. | ||
| 249 | */ | ||
| 250 | |||
| 251 | bool stealth; | ||
| 252 | bool is_opened; | ||
| 253 | |||
| 254 | /** | ||
| 255 | * Master management. | ||
| 256 | */ | ||
| 257 | |||
| 258 | struct vmw_master *active_master; | ||
| 259 | struct vmw_master fbdev_master; | ||
| 260 | }; | ||
| 261 | |||
| 262 | static inline struct vmw_private *vmw_priv(struct drm_device *dev) | ||
| 263 | { | ||
| 264 | return (struct vmw_private *)dev->dev_private; | ||
| 265 | } | ||
| 266 | |||
| 267 | static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) | ||
| 268 | { | ||
| 269 | return (struct vmw_fpriv *)file_priv->driver_priv; | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline struct vmw_master *vmw_master(struct drm_master *master) | ||
| 273 | { | ||
| 274 | return (struct vmw_master *) master->driver_priv; | ||
| 275 | } | ||
| 276 | |||
| 277 | static inline void vmw_write(struct vmw_private *dev_priv, | ||
| 278 | unsigned int offset, uint32_t value) | ||
| 279 | { | ||
| 280 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | ||
| 281 | outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline uint32_t vmw_read(struct vmw_private *dev_priv, | ||
| 285 | unsigned int offset) | ||
| 286 | { | ||
| 287 | uint32_t val; | ||
| 288 | |||
| 289 | outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); | ||
| 290 | val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); | ||
| 291 | return val; | ||
| 292 | } | ||
| 293 | |||
| 294 | /** | ||
| 295 | * GMR utilities - vmwgfx_gmr.c | ||
| 296 | */ | ||
| 297 | |||
| 298 | extern int vmw_gmr_bind(struct vmw_private *dev_priv, | ||
| 299 | struct ttm_buffer_object *bo); | ||
| 300 | extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); | ||
| 301 | |||
| 302 | /** | ||
| 303 | * Resource utilities - vmwgfx_resource.c | ||
| 304 | */ | ||
| 305 | |||
| 306 | extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); | ||
| 307 | extern void vmw_resource_unreference(struct vmw_resource **p_res); | ||
| 308 | extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); | ||
| 309 | extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 310 | struct drm_file *file_priv); | ||
| 311 | extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 312 | struct drm_file *file_priv); | ||
| 313 | extern int vmw_context_check(struct vmw_private *dev_priv, | ||
| 314 | struct ttm_object_file *tfile, | ||
| 315 | int id); | ||
| 316 | extern void vmw_surface_res_free(struct vmw_resource *res); | ||
| 317 | extern int vmw_surface_init(struct vmw_private *dev_priv, | ||
| 318 | struct vmw_surface *srf, | ||
| 319 | void (*res_free) (struct vmw_resource *res)); | ||
| 320 | extern int vmw_user_surface_lookup(struct vmw_private *dev_priv, | ||
| 321 | struct ttm_object_file *tfile, | ||
| 322 | int sid, struct vmw_surface **out); | ||
| 323 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 324 | struct drm_file *file_priv); | ||
| 325 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 326 | struct drm_file *file_priv); | ||
| 327 | extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 328 | struct drm_file *file_priv); | ||
| 329 | extern int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 330 | struct ttm_object_file *tfile, | ||
| 331 | int id); | ||
| 332 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | ||
| 333 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | ||
| 334 | struct vmw_dma_buffer *vmw_bo, | ||
| 335 | size_t size, struct ttm_placement *placement, | ||
| 336 | bool interuptable, | ||
| 337 | void (*bo_free) (struct ttm_buffer_object *bo)); | ||
| 338 | extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | ||
| 339 | struct drm_file *file_priv); | ||
| 340 | extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | ||
| 341 | struct drm_file *file_priv); | ||
| 342 | extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | ||
| 343 | uint32_t cur_validate_node); | ||
| 344 | extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); | ||
| 345 | extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | ||
| 346 | uint32_t id, struct vmw_dma_buffer **out); | ||
| 347 | extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo); | ||
| 348 | extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id); | ||
| 349 | extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id); | ||
| 350 | extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
| 351 | struct vmw_dma_buffer *bo); | ||
| 352 | extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
| 353 | struct vmw_dma_buffer *bo); | ||
| 354 | extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | ||
| 355 | struct drm_file *file_priv); | ||
| 356 | extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | ||
| 357 | struct drm_file *file_priv); | ||
| 358 | extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, | ||
| 359 | struct ttm_object_file *tfile, | ||
| 360 | uint32_t *inout_id, | ||
| 361 | struct vmw_resource **out); | ||
| 362 | |||
| 363 | |||
| 364 | /** | ||
| 365 | * Misc Ioctl functionality - vmwgfx_ioctl.c | ||
| 366 | */ | ||
| 367 | |||
| 368 | extern int vmw_getparam_ioctl(struct drm_device *dev, void *data, | ||
| 369 | struct drm_file *file_priv); | ||
| 370 | extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, | ||
| 371 | struct drm_file *file_priv); | ||
| 372 | |||
| 373 | /** | ||
| 374 | * Fifo utilities - vmwgfx_fifo.c | ||
| 375 | */ | ||
| 376 | |||
| 377 | extern int vmw_fifo_init(struct vmw_private *dev_priv, | ||
| 378 | struct vmw_fifo_state *fifo); | ||
| 379 | extern void vmw_fifo_release(struct vmw_private *dev_priv, | ||
| 380 | struct vmw_fifo_state *fifo); | ||
| 381 | extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); | ||
| 382 | extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); | ||
| 383 | extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | ||
| 384 | uint32_t *sequence); | ||
| 385 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | ||
| 386 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | ||
| 387 | |||
| 388 | /** | ||
| 389 | * TTM glue - vmwgfx_ttm_glue.c | ||
| 390 | */ | ||
| 391 | |||
| 392 | extern int vmw_ttm_global_init(struct vmw_private *dev_priv); | ||
| 393 | extern void vmw_ttm_global_release(struct vmw_private *dev_priv); | ||
| 394 | extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); | ||
| 395 | |||
| 396 | /** | ||
| 397 | * TTM buffer object driver - vmwgfx_buffer.c | ||
| 398 | */ | ||
| 399 | |||
| 400 | extern struct ttm_placement vmw_vram_placement; | ||
| 401 | extern struct ttm_placement vmw_vram_ne_placement; | ||
| 402 | extern struct ttm_placement vmw_sys_placement; | ||
| 403 | extern struct ttm_bo_driver vmw_bo_driver; | ||
| 404 | extern int vmw_dma_quiescent(struct drm_device *dev); | ||
| 405 | |||
| 406 | /** | ||
| 407 | * Command submission - vmwgfx_execbuf.c | ||
| 408 | */ | ||
| 409 | |||
| 410 | extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | ||
| 411 | struct drm_file *file_priv); | ||
| 412 | |||
| 413 | /** | ||
| 414 | * IRQs and wating - vmwgfx_irq.c | ||
| 415 | */ | ||
| 416 | |||
| 417 | extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); | ||
| 418 | extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy, | ||
| 419 | uint32_t sequence, bool interruptible, | ||
| 420 | unsigned long timeout); | ||
| 421 | extern void vmw_irq_preinstall(struct drm_device *dev); | ||
| 422 | extern int vmw_irq_postinstall(struct drm_device *dev); | ||
| 423 | extern void vmw_irq_uninstall(struct drm_device *dev); | ||
| 424 | extern bool vmw_fence_signaled(struct vmw_private *dev_priv, | ||
| 425 | uint32_t sequence); | ||
| 426 | extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, | ||
| 427 | struct drm_file *file_priv); | ||
| 428 | extern int vmw_fallback_wait(struct vmw_private *dev_priv, | ||
| 429 | bool lazy, | ||
| 430 | bool fifo_idle, | ||
| 431 | uint32_t sequence, | ||
| 432 | bool interruptible, | ||
| 433 | unsigned long timeout); | ||
| 434 | |||
| 435 | /** | ||
| 436 | * Kernel framebuffer - vmwgfx_fb.c | ||
| 437 | */ | ||
| 438 | |||
| 439 | int vmw_fb_init(struct vmw_private *vmw_priv); | ||
| 440 | int vmw_fb_close(struct vmw_private *dev_priv); | ||
| 441 | int vmw_fb_off(struct vmw_private *vmw_priv); | ||
| 442 | int vmw_fb_on(struct vmw_private *vmw_priv); | ||
| 443 | |||
| 444 | /** | ||
| 445 | * Kernel modesetting - vmwgfx_kms.c | ||
| 446 | */ | ||
| 447 | |||
| 448 | int vmw_kms_init(struct vmw_private *dev_priv); | ||
| 449 | int vmw_kms_close(struct vmw_private *dev_priv); | ||
| 450 | int vmw_kms_save_vga(struct vmw_private *vmw_priv); | ||
| 451 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv); | ||
| 452 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, | ||
| 453 | struct drm_file *file_priv); | ||
| 454 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); | ||
| 455 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, | ||
| 456 | struct ttm_object_file *tfile, | ||
| 457 | struct ttm_buffer_object *bo, | ||
| 458 | SVGA3dCmdHeader *header); | ||
| 459 | |||
| 460 | /** | ||
| 461 | * Overlay control - vmwgfx_overlay.c | ||
| 462 | */ | ||
| 463 | |||
| 464 | int vmw_overlay_init(struct vmw_private *dev_priv); | ||
| 465 | int vmw_overlay_close(struct vmw_private *dev_priv); | ||
| 466 | int vmw_overlay_ioctl(struct drm_device *dev, void *data, | ||
| 467 | struct drm_file *file_priv); | ||
| 468 | int vmw_overlay_stop_all(struct vmw_private *dev_priv); | ||
| 469 | int vmw_overlay_resume_all(struct vmw_private *dev_priv); | ||
| 470 | int vmw_overlay_pause_all(struct vmw_private *dev_priv); | ||
| 471 | int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); | ||
| 472 | int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); | ||
| 473 | int vmw_overlay_num_overlays(struct vmw_private *dev_priv); | ||
| 474 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv); | ||
| 475 | |||
| 476 | /** | ||
| 477 | * Inline helper functions | ||
| 478 | */ | ||
| 479 | |||
| 480 | static inline void vmw_surface_unreference(struct vmw_surface **srf) | ||
| 481 | { | ||
| 482 | struct vmw_surface *tmp_srf = *srf; | ||
| 483 | struct vmw_resource *res = &tmp_srf->res; | ||
| 484 | *srf = NULL; | ||
| 485 | |||
| 486 | vmw_resource_unreference(&res); | ||
| 487 | } | ||
| 488 | |||
| 489 | static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf) | ||
| 490 | { | ||
| 491 | (void) vmw_resource_reference(&srf->res); | ||
| 492 | return srf; | ||
| 493 | } | ||
| 494 | |||
| 495 | static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) | ||
| 496 | { | ||
| 497 | struct vmw_dma_buffer *tmp_buf = *buf; | ||
| 498 | struct ttm_buffer_object *bo = &tmp_buf->base; | ||
| 499 | *buf = NULL; | ||
| 500 | |||
| 501 | ttm_bo_unref(&bo); | ||
| 502 | } | ||
| 503 | |||
| 504 | static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) | ||
| 505 | { | ||
| 506 | if (ttm_bo_reference(&buf->base)) | ||
| 507 | return buf; | ||
| 508 | return NULL; | ||
| 509 | } | ||
| 510 | |||
| 511 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c new file mode 100644 index 000000000000..7a39f3e6dc2c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -0,0 +1,516 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "vmwgfx_reg.h" | ||
| 30 | #include "ttm/ttm_bo_api.h" | ||
| 31 | #include "ttm/ttm_placement.h" | ||
| 32 | |||
| 33 | static int vmw_cmd_invalid(struct vmw_private *dev_priv, | ||
| 34 | struct vmw_sw_context *sw_context, | ||
| 35 | SVGA3dCmdHeader *header) | ||
| 36 | { | ||
| 37 | return capable(CAP_SYS_ADMIN) ? : -EINVAL; | ||
| 38 | } | ||
| 39 | |||
| 40 | static int vmw_cmd_ok(struct vmw_private *dev_priv, | ||
| 41 | struct vmw_sw_context *sw_context, | ||
| 42 | SVGA3dCmdHeader *header) | ||
| 43 | { | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | ||
| 48 | struct vmw_sw_context *sw_context, | ||
| 49 | SVGA3dCmdHeader *header) | ||
| 50 | { | ||
| 51 | struct vmw_cid_cmd { | ||
| 52 | SVGA3dCmdHeader header; | ||
| 53 | __le32 cid; | ||
| 54 | } *cmd; | ||
| 55 | int ret; | ||
| 56 | |||
| 57 | cmd = container_of(header, struct vmw_cid_cmd, header); | ||
| 58 | if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) | ||
| 59 | return 0; | ||
| 60 | |||
| 61 | ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid); | ||
| 62 | if (unlikely(ret != 0)) { | ||
| 63 | DRM_ERROR("Could not find or use context %u\n", | ||
| 64 | (unsigned) cmd->cid); | ||
| 65 | return ret; | ||
| 66 | } | ||
| 67 | |||
| 68 | sw_context->last_cid = cmd->cid; | ||
| 69 | sw_context->cid_valid = true; | ||
| 70 | |||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | ||
| 75 | struct vmw_sw_context *sw_context, | ||
| 76 | uint32_t sid) | ||
| 77 | { | ||
| 78 | if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && | ||
| 79 | sid != SVGA3D_INVALID_ID)) { | ||
| 80 | int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); | ||
| 81 | |||
| 82 | if (unlikely(ret != 0)) { | ||
| 83 | DRM_ERROR("Could ot find or use surface %u\n", | ||
| 84 | (unsigned) sid); | ||
| 85 | return ret; | ||
| 86 | } | ||
| 87 | |||
| 88 | sw_context->last_sid = sid; | ||
| 89 | sw_context->sid_valid = true; | ||
| 90 | } | ||
| 91 | return 0; | ||
| 92 | } | ||
| 93 | |||
| 94 | |||
| 95 | static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | ||
| 96 | struct vmw_sw_context *sw_context, | ||
| 97 | SVGA3dCmdHeader *header) | ||
| 98 | { | ||
| 99 | struct vmw_sid_cmd { | ||
| 100 | SVGA3dCmdHeader header; | ||
| 101 | SVGA3dCmdSetRenderTarget body; | ||
| 102 | } *cmd; | ||
| 103 | int ret; | ||
| 104 | |||
| 105 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 106 | if (unlikely(ret != 0)) | ||
| 107 | return ret; | ||
| 108 | |||
| 109 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 110 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); | ||
| 111 | } | ||
| 112 | |||
| 113 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | ||
| 114 | struct vmw_sw_context *sw_context, | ||
| 115 | SVGA3dCmdHeader *header) | ||
| 116 | { | ||
| 117 | struct vmw_sid_cmd { | ||
| 118 | SVGA3dCmdHeader header; | ||
| 119 | SVGA3dCmdSurfaceCopy body; | ||
| 120 | } *cmd; | ||
| 121 | int ret; | ||
| 122 | |||
| 123 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 124 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); | ||
| 125 | if (unlikely(ret != 0)) | ||
| 126 | return ret; | ||
| 127 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); | ||
| 128 | } | ||
| 129 | |||
| 130 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | ||
| 131 | struct vmw_sw_context *sw_context, | ||
| 132 | SVGA3dCmdHeader *header) | ||
| 133 | { | ||
| 134 | struct vmw_sid_cmd { | ||
| 135 | SVGA3dCmdHeader header; | ||
| 136 | SVGA3dCmdSurfaceStretchBlt body; | ||
| 137 | } *cmd; | ||
| 138 | int ret; | ||
| 139 | |||
| 140 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 141 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); | ||
| 142 | if (unlikely(ret != 0)) | ||
| 143 | return ret; | ||
| 144 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); | ||
| 145 | } | ||
| 146 | |||
| 147 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | ||
| 148 | struct vmw_sw_context *sw_context, | ||
| 149 | SVGA3dCmdHeader *header) | ||
| 150 | { | ||
| 151 | struct vmw_sid_cmd { | ||
| 152 | SVGA3dCmdHeader header; | ||
| 153 | SVGA3dCmdBlitSurfaceToScreen body; | ||
| 154 | } *cmd; | ||
| 155 | |||
| 156 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 157 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); | ||
| 158 | } | ||
| 159 | |||
| 160 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | ||
| 161 | struct vmw_sw_context *sw_context, | ||
| 162 | SVGA3dCmdHeader *header) | ||
| 163 | { | ||
| 164 | struct vmw_sid_cmd { | ||
| 165 | SVGA3dCmdHeader header; | ||
| 166 | SVGA3dCmdPresent body; | ||
| 167 | } *cmd; | ||
| 168 | |||
| 169 | cmd = container_of(header, struct vmw_sid_cmd, header); | ||
| 170 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); | ||
| 171 | } | ||
| 172 | |||
| 173 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | ||
| 174 | struct vmw_sw_context *sw_context, | ||
| 175 | SVGA3dCmdHeader *header) | ||
| 176 | { | ||
| 177 | uint32_t handle; | ||
| 178 | struct vmw_dma_buffer *vmw_bo = NULL; | ||
| 179 | struct ttm_buffer_object *bo; | ||
| 180 | struct vmw_surface *srf = NULL; | ||
| 181 | struct vmw_dma_cmd { | ||
| 182 | SVGA3dCmdHeader header; | ||
| 183 | SVGA3dCmdSurfaceDMA dma; | ||
| 184 | } *cmd; | ||
| 185 | struct vmw_relocation *reloc; | ||
| 186 | int ret; | ||
| 187 | uint32_t cur_validate_node; | ||
| 188 | struct ttm_validate_buffer *val_buf; | ||
| 189 | |||
| 190 | |||
| 191 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
| 192 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid); | ||
| 193 | if (unlikely(ret != 0)) | ||
| 194 | return ret; | ||
| 195 | |||
| 196 | handle = cmd->dma.guest.ptr.gmrId; | ||
| 197 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | ||
| 198 | if (unlikely(ret != 0)) { | ||
| 199 | DRM_ERROR("Could not find or use GMR region.\n"); | ||
| 200 | return -EINVAL; | ||
| 201 | } | ||
| 202 | bo = &vmw_bo->base; | ||
| 203 | |||
| 204 | if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) { | ||
| 205 | DRM_ERROR("Max number of DMA commands per submission" | ||
| 206 | " exceeded\n"); | ||
| 207 | ret = -EINVAL; | ||
| 208 | goto out_no_reloc; | ||
| 209 | } | ||
| 210 | |||
| 211 | reloc = &sw_context->relocs[sw_context->cur_reloc++]; | ||
| 212 | reloc->location = &cmd->dma.guest.ptr; | ||
| 213 | |||
| 214 | cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); | ||
| 215 | if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) { | ||
| 216 | DRM_ERROR("Max number of DMA buffers per submission" | ||
| 217 | " exceeded.\n"); | ||
| 218 | ret = -EINVAL; | ||
| 219 | goto out_no_reloc; | ||
| 220 | } | ||
| 221 | |||
| 222 | reloc->index = cur_validate_node; | ||
| 223 | if (unlikely(cur_validate_node == sw_context->cur_val_buf)) { | ||
| 224 | val_buf = &sw_context->val_bufs[cur_validate_node]; | ||
| 225 | val_buf->bo = ttm_bo_reference(bo); | ||
| 226 | val_buf->new_sync_obj_arg = (void *) dev_priv; | ||
| 227 | list_add_tail(&val_buf->head, &sw_context->validate_nodes); | ||
| 228 | ++sw_context->cur_val_buf; | ||
| 229 | } | ||
| 230 | |||
| 231 | ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, | ||
| 232 | cmd->dma.host.sid, &srf); | ||
| 233 | if (ret) { | ||
| 234 | DRM_ERROR("could not find surface\n"); | ||
| 235 | goto out_no_reloc; | ||
| 236 | } | ||
| 237 | |||
| 238 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | ||
| 239 | vmw_surface_unreference(&srf); | ||
| 240 | |||
| 241 | out_no_reloc: | ||
| 242 | vmw_dmabuf_unreference(&vmw_bo); | ||
| 243 | return ret; | ||
| 244 | } | ||
| 245 | |||
| 246 | |||
| 247 | typedef int (*vmw_cmd_func) (struct vmw_private *, | ||
| 248 | struct vmw_sw_context *, | ||
| 249 | SVGA3dCmdHeader *); | ||
| 250 | |||
| 251 | #define VMW_CMD_DEF(cmd, func) \ | ||
| 252 | [cmd - SVGA_3D_CMD_BASE] = func | ||
| 253 | |||
| 254 | static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | ||
| 255 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid), | ||
| 256 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid), | ||
| 257 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check), | ||
| 258 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check), | ||
| 259 | VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma), | ||
| 260 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid), | ||
| 261 | VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid), | ||
| 262 | VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check), | ||
| 263 | VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check), | ||
| 264 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | ||
| 265 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | ||
| 266 | &vmw_cmd_set_render_target_check), | ||
| 267 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), | ||
| 268 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), | ||
| 269 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | ||
| 270 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | ||
| 271 | VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check), | ||
| 272 | VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check), | ||
| 273 | VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check), | ||
| 274 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), | ||
| 275 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), | ||
| 276 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | ||
| 277 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), | ||
| 278 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | ||
| 279 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), | ||
| 280 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | ||
| 281 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | ||
| 282 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | ||
| 283 | VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check), | ||
| 284 | VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), | ||
| 285 | VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, | ||
| 286 | &vmw_cmd_blt_surf_screen_check) | ||
| 287 | }; | ||
| 288 | |||
| 289 | static int vmw_cmd_check(struct vmw_private *dev_priv, | ||
| 290 | struct vmw_sw_context *sw_context, | ||
| 291 | void *buf, uint32_t *size) | ||
| 292 | { | ||
| 293 | uint32_t cmd_id; | ||
| 294 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | ||
| 295 | int ret; | ||
| 296 | |||
| 297 | cmd_id = ((uint32_t *)buf)[0]; | ||
| 298 | if (cmd_id == SVGA_CMD_UPDATE) { | ||
| 299 | *size = 5 << 2; | ||
| 300 | return 0; | ||
| 301 | } | ||
| 302 | |||
| 303 | cmd_id = le32_to_cpu(header->id); | ||
| 304 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | ||
| 305 | |||
| 306 | cmd_id -= SVGA_3D_CMD_BASE; | ||
| 307 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | ||
| 308 | goto out_err; | ||
| 309 | |||
| 310 | ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header); | ||
| 311 | if (unlikely(ret != 0)) | ||
| 312 | goto out_err; | ||
| 313 | |||
| 314 | return 0; | ||
| 315 | out_err: | ||
| 316 | DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n", | ||
| 317 | cmd_id + SVGA_3D_CMD_BASE); | ||
| 318 | return -EINVAL; | ||
| 319 | } | ||
| 320 | |||
| 321 | static int vmw_cmd_check_all(struct vmw_private *dev_priv, | ||
| 322 | struct vmw_sw_context *sw_context, | ||
| 323 | void *buf, uint32_t size) | ||
| 324 | { | ||
| 325 | int32_t cur_size = size; | ||
| 326 | int ret; | ||
| 327 | |||
| 328 | while (cur_size > 0) { | ||
| 329 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); | ||
| 330 | if (unlikely(ret != 0)) | ||
| 331 | return ret; | ||
| 332 | buf = (void *)((unsigned long) buf + size); | ||
| 333 | cur_size -= size; | ||
| 334 | } | ||
| 335 | |||
| 336 | if (unlikely(cur_size != 0)) { | ||
| 337 | DRM_ERROR("Command verifier out of sync.\n"); | ||
| 338 | return -EINVAL; | ||
| 339 | } | ||
| 340 | |||
| 341 | return 0; | ||
| 342 | } | ||
| 343 | |||
| 344 | static void vmw_free_relocations(struct vmw_sw_context *sw_context) | ||
| 345 | { | ||
| 346 | sw_context->cur_reloc = 0; | ||
| 347 | } | ||
| 348 | |||
| 349 | static void vmw_apply_relocations(struct vmw_sw_context *sw_context) | ||
| 350 | { | ||
| 351 | uint32_t i; | ||
| 352 | struct vmw_relocation *reloc; | ||
| 353 | struct ttm_validate_buffer *validate; | ||
| 354 | struct ttm_buffer_object *bo; | ||
| 355 | |||
| 356 | for (i = 0; i < sw_context->cur_reloc; ++i) { | ||
| 357 | reloc = &sw_context->relocs[i]; | ||
| 358 | validate = &sw_context->val_bufs[reloc->index]; | ||
| 359 | bo = validate->bo; | ||
| 360 | reloc->location->offset += bo->offset; | ||
| 361 | reloc->location->gmrId = vmw_dmabuf_gmr(bo); | ||
| 362 | } | ||
| 363 | vmw_free_relocations(sw_context); | ||
| 364 | } | ||
| 365 | |||
| 366 | static void vmw_clear_validations(struct vmw_sw_context *sw_context) | ||
| 367 | { | ||
| 368 | struct ttm_validate_buffer *entry, *next; | ||
| 369 | |||
| 370 | list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, | ||
| 371 | head) { | ||
| 372 | list_del(&entry->head); | ||
| 373 | vmw_dmabuf_validate_clear(entry->bo); | ||
| 374 | ttm_bo_unref(&entry->bo); | ||
| 375 | sw_context->cur_val_buf--; | ||
| 376 | } | ||
| 377 | BUG_ON(sw_context->cur_val_buf != 0); | ||
| 378 | } | ||
| 379 | |||
| 380 | static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | ||
| 381 | struct ttm_buffer_object *bo) | ||
| 382 | { | ||
| 383 | int ret; | ||
| 384 | |||
| 385 | if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | ret = vmw_gmr_bind(dev_priv, bo); | ||
| 389 | if (likely(ret == 0 || ret == -ERESTART)) | ||
| 390 | return ret; | ||
| 391 | |||
| 392 | |||
| 393 | ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); | ||
| 394 | return ret; | ||
| 395 | } | ||
| 396 | |||
| 397 | |||
| 398 | static int vmw_validate_buffers(struct vmw_private *dev_priv, | ||
| 399 | struct vmw_sw_context *sw_context) | ||
| 400 | { | ||
| 401 | struct ttm_validate_buffer *entry; | ||
| 402 | int ret; | ||
| 403 | |||
| 404 | list_for_each_entry(entry, &sw_context->validate_nodes, head) { | ||
| 405 | ret = vmw_validate_single_buffer(dev_priv, entry->bo); | ||
| 406 | if (unlikely(ret != 0)) | ||
| 407 | return ret; | ||
| 408 | } | ||
| 409 | return 0; | ||
| 410 | } | ||
| 411 | |||
| 412 | int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | ||
| 413 | struct drm_file *file_priv) | ||
| 414 | { | ||
| 415 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 416 | struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; | ||
| 417 | struct drm_vmw_fence_rep fence_rep; | ||
| 418 | struct drm_vmw_fence_rep __user *user_fence_rep; | ||
| 419 | int ret; | ||
| 420 | void *user_cmd; | ||
| 421 | void *cmd; | ||
| 422 | uint32_t sequence; | ||
| 423 | struct vmw_sw_context *sw_context = &dev_priv->ctx; | ||
| 424 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 425 | |||
| 426 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 427 | if (unlikely(ret != 0)) | ||
| 428 | return ret; | ||
| 429 | |||
| 430 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | ||
| 431 | if (unlikely(ret != 0)) { | ||
| 432 | ret = -ERESTART; | ||
| 433 | goto out_no_cmd_mutex; | ||
| 434 | } | ||
| 435 | |||
| 436 | cmd = vmw_fifo_reserve(dev_priv, arg->command_size); | ||
| 437 | if (unlikely(cmd == NULL)) { | ||
| 438 | DRM_ERROR("Failed reserving fifo space for commands.\n"); | ||
| 439 | ret = -ENOMEM; | ||
| 440 | goto out_unlock; | ||
| 441 | } | ||
| 442 | |||
| 443 | user_cmd = (void __user *)(unsigned long)arg->commands; | ||
| 444 | ret = copy_from_user(cmd, user_cmd, arg->command_size); | ||
| 445 | |||
| 446 | if (unlikely(ret != 0)) { | ||
| 447 | DRM_ERROR("Failed copying commands.\n"); | ||
| 448 | goto out_commit; | ||
| 449 | } | ||
| 450 | |||
| 451 | sw_context->tfile = vmw_fpriv(file_priv)->tfile; | ||
| 452 | sw_context->cid_valid = false; | ||
| 453 | sw_context->sid_valid = false; | ||
| 454 | sw_context->cur_reloc = 0; | ||
| 455 | sw_context->cur_val_buf = 0; | ||
| 456 | |||
| 457 | INIT_LIST_HEAD(&sw_context->validate_nodes); | ||
| 458 | |||
| 459 | ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size); | ||
| 460 | if (unlikely(ret != 0)) | ||
| 461 | goto out_err; | ||
| 462 | ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes, | ||
| 463 | dev_priv->val_seq++); | ||
| 464 | if (unlikely(ret != 0)) | ||
| 465 | goto out_err; | ||
| 466 | |||
| 467 | ret = vmw_validate_buffers(dev_priv, sw_context); | ||
| 468 | if (unlikely(ret != 0)) | ||
| 469 | goto out_err; | ||
| 470 | |||
| 471 | vmw_apply_relocations(sw_context); | ||
| 472 | vmw_fifo_commit(dev_priv, arg->command_size); | ||
| 473 | |||
| 474 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | ||
| 475 | |||
| 476 | ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, | ||
| 477 | (void *)(unsigned long) sequence); | ||
| 478 | vmw_clear_validations(sw_context); | ||
| 479 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 480 | |||
| 481 | /* | ||
| 482 | * This error is harmless, because if fence submission fails, | ||
| 483 | * vmw_fifo_send_fence will sync. | ||
| 484 | */ | ||
| 485 | |||
| 486 | if (ret != 0) | ||
| 487 | DRM_ERROR("Fence submission error. Syncing.\n"); | ||
| 488 | |||
| 489 | fence_rep.error = ret; | ||
| 490 | fence_rep.fence_seq = (uint64_t) sequence; | ||
| 491 | |||
| 492 | user_fence_rep = (struct drm_vmw_fence_rep __user *) | ||
| 493 | (unsigned long)arg->fence_rep; | ||
| 494 | |||
| 495 | /* | ||
| 496 | * copy_to_user errors will be detected by user space not | ||
| 497 | * seeing fence_rep::error filled in. | ||
| 498 | */ | ||
| 499 | |||
| 500 | ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); | ||
| 501 | |||
| 502 | vmw_kms_cursor_post_execbuf(dev_priv); | ||
| 503 | ttm_read_unlock(&vmaster->lock); | ||
| 504 | return 0; | ||
| 505 | out_err: | ||
| 506 | vmw_free_relocations(sw_context); | ||
| 507 | ttm_eu_backoff_reservation(&sw_context->validate_nodes); | ||
| 508 | vmw_clear_validations(sw_context); | ||
| 509 | out_commit: | ||
| 510 | vmw_fifo_commit(dev_priv, 0); | ||
| 511 | out_unlock: | ||
| 512 | mutex_unlock(&dev_priv->cmdbuf_mutex); | ||
| 513 | out_no_cmd_mutex: | ||
| 514 | ttm_read_unlock(&vmaster->lock); | ||
| 515 | return ret; | ||
| 516 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c new file mode 100644 index 000000000000..641dde76ada1 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
| @@ -0,0 +1,742 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2007 David Airlie | ||
| 4 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 5 | * All Rights Reserved. | ||
| 6 | * | ||
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 8 | * copy of this software and associated documentation files (the | ||
| 9 | * "Software"), to deal in the Software without restriction, including | ||
| 10 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 11 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 12 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 13 | * the following conditions: | ||
| 14 | * | ||
| 15 | * The above copyright notice and this permission notice (including the | ||
| 16 | * next paragraph) shall be included in all copies or substantial portions | ||
| 17 | * of the Software. | ||
| 18 | * | ||
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 26 | * | ||
| 27 | **************************************************************************/ | ||
| 28 | |||
| 29 | #include "drmP.h" | ||
| 30 | #include "vmwgfx_drv.h" | ||
| 31 | |||
| 32 | #include "ttm/ttm_placement.h" | ||
| 33 | |||
| 34 | #define VMW_DIRTY_DELAY (HZ / 30) | ||
| 35 | |||
| 36 | struct vmw_fb_par { | ||
| 37 | struct vmw_private *vmw_priv; | ||
| 38 | |||
| 39 | void *vmalloc; | ||
| 40 | |||
| 41 | struct vmw_dma_buffer *vmw_bo; | ||
| 42 | struct ttm_bo_kmap_obj map; | ||
| 43 | |||
| 44 | u32 pseudo_palette[17]; | ||
| 45 | |||
| 46 | unsigned depth; | ||
| 47 | unsigned bpp; | ||
| 48 | |||
| 49 | unsigned max_width; | ||
| 50 | unsigned max_height; | ||
| 51 | |||
| 52 | void *bo_ptr; | ||
| 53 | unsigned bo_size; | ||
| 54 | bool bo_iowrite; | ||
| 55 | |||
| 56 | struct { | ||
| 57 | spinlock_t lock; | ||
| 58 | bool active; | ||
| 59 | unsigned x1; | ||
| 60 | unsigned y1; | ||
| 61 | unsigned x2; | ||
| 62 | unsigned y2; | ||
| 63 | } dirty; | ||
| 64 | }; | ||
| 65 | |||
| 66 | static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, | ||
| 67 | unsigned blue, unsigned transp, | ||
| 68 | struct fb_info *info) | ||
| 69 | { | ||
| 70 | struct vmw_fb_par *par = info->par; | ||
| 71 | u32 *pal = par->pseudo_palette; | ||
| 72 | |||
| 73 | if (regno > 15) { | ||
| 74 | DRM_ERROR("Bad regno %u.\n", regno); | ||
| 75 | return 1; | ||
| 76 | } | ||
| 77 | |||
| 78 | switch (par->depth) { | ||
| 79 | case 24: | ||
| 80 | case 32: | ||
| 81 | pal[regno] = ((red & 0xff00) << 8) | | ||
| 82 | (green & 0xff00) | | ||
| 83 | ((blue & 0xff00) >> 8); | ||
| 84 | break; | ||
| 85 | default: | ||
| 86 | DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); | ||
| 87 | return 1; | ||
| 88 | } | ||
| 89 | |||
| 90 | return 0; | ||
| 91 | } | ||
| 92 | |||
| 93 | static int vmw_fb_check_var(struct fb_var_screeninfo *var, | ||
| 94 | struct fb_info *info) | ||
| 95 | { | ||
| 96 | int depth = var->bits_per_pixel; | ||
| 97 | struct vmw_fb_par *par = info->par; | ||
| 98 | struct vmw_private *vmw_priv = par->vmw_priv; | ||
| 99 | |||
| 100 | switch (var->bits_per_pixel) { | ||
| 101 | case 32: | ||
| 102 | depth = (var->transp.length > 0) ? 32 : 24; | ||
| 103 | break; | ||
| 104 | default: | ||
| 105 | DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); | ||
| 106 | return -EINVAL; | ||
| 107 | } | ||
| 108 | |||
| 109 | switch (depth) { | ||
| 110 | case 24: | ||
| 111 | var->red.offset = 16; | ||
| 112 | var->green.offset = 8; | ||
| 113 | var->blue.offset = 0; | ||
| 114 | var->red.length = 8; | ||
| 115 | var->green.length = 8; | ||
| 116 | var->blue.length = 8; | ||
| 117 | var->transp.length = 0; | ||
| 118 | var->transp.offset = 0; | ||
| 119 | break; | ||
| 120 | case 32: | ||
| 121 | var->red.offset = 16; | ||
| 122 | var->green.offset = 8; | ||
| 123 | var->blue.offset = 0; | ||
| 124 | var->red.length = 8; | ||
| 125 | var->green.length = 8; | ||
| 126 | var->blue.length = 8; | ||
| 127 | var->transp.length = 8; | ||
| 128 | var->transp.offset = 24; | ||
| 129 | break; | ||
| 130 | default: | ||
| 131 | DRM_ERROR("Bad depth %u.\n", depth); | ||
| 132 | return -EINVAL; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* without multimon its hard to resize */ | ||
| 136 | if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && | ||
| 137 | (var->xres != par->max_width || | ||
| 138 | var->yres != par->max_height)) { | ||
| 139 | DRM_ERROR("Tried to resize, but we don't have multimon\n"); | ||
| 140 | return -EINVAL; | ||
| 141 | } | ||
| 142 | |||
| 143 | if (var->xres > par->max_width || | ||
| 144 | var->yres > par->max_height) { | ||
| 145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | ||
| 146 | return -EINVAL; | ||
| 147 | } | ||
| 148 | |||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 152 | static int vmw_fb_set_par(struct fb_info *info) | ||
| 153 | { | ||
| 154 | struct vmw_fb_par *par = info->par; | ||
| 155 | struct vmw_private *vmw_priv = par->vmw_priv; | ||
| 156 | |||
| 157 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 158 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 159 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 160 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 163 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 164 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 166 | |||
| 167 | vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); | ||
| 168 | vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); | ||
| 169 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); | ||
| 170 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); | ||
| 171 | vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); | ||
| 172 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 173 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 174 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 175 | |||
| 176 | /* TODO check if pitch and offset changes */ | ||
| 177 | |||
| 178 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 179 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 180 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 181 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); | ||
| 182 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); | ||
| 183 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); | ||
| 184 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); | ||
| 185 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 186 | } else { | ||
| 187 | vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); | ||
| 188 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); | ||
| 189 | |||
| 190 | /* TODO check if pitch and offset changes */ | ||
| 191 | } | ||
| 192 | |||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int vmw_fb_pan_display(struct fb_var_screeninfo *var, | ||
| 197 | struct fb_info *info) | ||
| 198 | { | ||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static int vmw_fb_blank(int blank, struct fb_info *info) | ||
| 203 | { | ||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | /* | ||
| 208 | * Dirty code | ||
| 209 | */ | ||
| 210 | |||
| 211 | static void vmw_fb_dirty_flush(struct vmw_fb_par *par) | ||
| 212 | { | ||
| 213 | struct vmw_private *vmw_priv = par->vmw_priv; | ||
| 214 | struct fb_info *info = vmw_priv->fb_info; | ||
| 215 | int stride = (info->fix.line_length / 4); | ||
| 216 | int *src = (int *)info->screen_base; | ||
| 217 | __le32 __iomem *vram_mem = par->bo_ptr; | ||
| 218 | unsigned long flags; | ||
| 219 | unsigned x, y, w, h; | ||
| 220 | int i, k; | ||
| 221 | struct { | ||
| 222 | uint32_t header; | ||
| 223 | SVGAFifoCmdUpdate body; | ||
| 224 | } *cmd; | ||
| 225 | |||
| 226 | spin_lock_irqsave(&par->dirty.lock, flags); | ||
| 227 | if (!par->dirty.active) { | ||
| 228 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 229 | return; | ||
| 230 | } | ||
| 231 | x = par->dirty.x1; | ||
| 232 | y = par->dirty.y1; | ||
| 233 | w = min(par->dirty.x2, info->var.xres) - x; | ||
| 234 | h = min(par->dirty.y2, info->var.yres) - y; | ||
| 235 | par->dirty.x1 = par->dirty.x2 = 0; | ||
| 236 | par->dirty.y1 = par->dirty.y2 = 0; | ||
| 237 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 238 | |||
| 239 | for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { | ||
| 240 | for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) | ||
| 241 | iowrite32(src[k], vram_mem + k); | ||
| 242 | } | ||
| 243 | |||
| 244 | #if 0 | ||
| 245 | DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); | ||
| 246 | #endif | ||
| 247 | |||
| 248 | cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); | ||
| 249 | if (unlikely(cmd == NULL)) { | ||
| 250 | DRM_ERROR("Fifo reserve failed.\n"); | ||
| 251 | return; | ||
| 252 | } | ||
| 253 | |||
| 254 | cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); | ||
| 255 | cmd->body.x = cpu_to_le32(x); | ||
| 256 | cmd->body.y = cpu_to_le32(y); | ||
| 257 | cmd->body.width = cpu_to_le32(w); | ||
| 258 | cmd->body.height = cpu_to_le32(h); | ||
| 259 | vmw_fifo_commit(vmw_priv, sizeof(*cmd)); | ||
| 260 | } | ||
| 261 | |||
| 262 | static void vmw_fb_dirty_mark(struct vmw_fb_par *par, | ||
| 263 | unsigned x1, unsigned y1, | ||
| 264 | unsigned width, unsigned height) | ||
| 265 | { | ||
| 266 | struct fb_info *info = par->vmw_priv->fb_info; | ||
| 267 | unsigned long flags; | ||
| 268 | unsigned x2 = x1 + width; | ||
| 269 | unsigned y2 = y1 + height; | ||
| 270 | |||
| 271 | spin_lock_irqsave(&par->dirty.lock, flags); | ||
| 272 | if (par->dirty.x1 == par->dirty.x2) { | ||
| 273 | par->dirty.x1 = x1; | ||
| 274 | par->dirty.y1 = y1; | ||
| 275 | par->dirty.x2 = x2; | ||
| 276 | par->dirty.y2 = y2; | ||
| 277 | /* if we are active start the dirty work | ||
| 278 | * we share the work with the defio system */ | ||
| 279 | if (par->dirty.active) | ||
| 280 | schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY); | ||
| 281 | } else { | ||
| 282 | if (x1 < par->dirty.x1) | ||
| 283 | par->dirty.x1 = x1; | ||
| 284 | if (y1 < par->dirty.y1) | ||
| 285 | par->dirty.y1 = y1; | ||
| 286 | if (x2 > par->dirty.x2) | ||
| 287 | par->dirty.x2 = x2; | ||
| 288 | if (y2 > par->dirty.y2) | ||
| 289 | par->dirty.y2 = y2; | ||
| 290 | } | ||
| 291 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 292 | } | ||
| 293 | |||
| 294 | static void vmw_deferred_io(struct fb_info *info, | ||
| 295 | struct list_head *pagelist) | ||
| 296 | { | ||
| 297 | struct vmw_fb_par *par = info->par; | ||
| 298 | unsigned long start, end, min, max; | ||
| 299 | unsigned long flags; | ||
| 300 | struct page *page; | ||
| 301 | int y1, y2; | ||
| 302 | |||
| 303 | min = ULONG_MAX; | ||
| 304 | max = 0; | ||
| 305 | list_for_each_entry(page, pagelist, lru) { | ||
| 306 | start = page->index << PAGE_SHIFT; | ||
| 307 | end = start + PAGE_SIZE - 1; | ||
| 308 | min = min(min, start); | ||
| 309 | max = max(max, end); | ||
| 310 | } | ||
| 311 | |||
| 312 | if (min < max) { | ||
| 313 | y1 = min / info->fix.line_length; | ||
| 314 | y2 = (max / info->fix.line_length) + 1; | ||
| 315 | |||
| 316 | spin_lock_irqsave(&par->dirty.lock, flags); | ||
| 317 | par->dirty.x1 = 0; | ||
| 318 | par->dirty.y1 = y1; | ||
| 319 | par->dirty.x2 = info->var.xres; | ||
| 320 | par->dirty.y2 = y2; | ||
| 321 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 322 | } | ||
| 323 | |||
| 324 | vmw_fb_dirty_flush(par); | ||
| 325 | }; | ||
| 326 | |||
| 327 | struct fb_deferred_io vmw_defio = { | ||
| 328 | .delay = VMW_DIRTY_DELAY, | ||
| 329 | .deferred_io = vmw_deferred_io, | ||
| 330 | }; | ||
| 331 | |||
| 332 | /* | ||
| 333 | * Draw code | ||
| 334 | */ | ||
| 335 | |||
| 336 | static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) | ||
| 337 | { | ||
| 338 | cfb_fillrect(info, rect); | ||
| 339 | vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, | ||
| 340 | rect->width, rect->height); | ||
| 341 | } | ||
| 342 | |||
| 343 | static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) | ||
| 344 | { | ||
| 345 | cfb_copyarea(info, region); | ||
| 346 | vmw_fb_dirty_mark(info->par, region->dx, region->dy, | ||
| 347 | region->width, region->height); | ||
| 348 | } | ||
| 349 | |||
| 350 | static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) | ||
| 351 | { | ||
| 352 | cfb_imageblit(info, image); | ||
| 353 | vmw_fb_dirty_mark(info->par, image->dx, image->dy, | ||
| 354 | image->width, image->height); | ||
| 355 | } | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Bring up code | ||
| 359 | */ | ||
| 360 | |||
| 361 | static struct fb_ops vmw_fb_ops = { | ||
| 362 | .owner = THIS_MODULE, | ||
| 363 | .fb_check_var = vmw_fb_check_var, | ||
| 364 | .fb_set_par = vmw_fb_set_par, | ||
| 365 | .fb_setcolreg = vmw_fb_setcolreg, | ||
| 366 | .fb_fillrect = vmw_fb_fillrect, | ||
| 367 | .fb_copyarea = vmw_fb_copyarea, | ||
| 368 | .fb_imageblit = vmw_fb_imageblit, | ||
| 369 | .fb_pan_display = vmw_fb_pan_display, | ||
| 370 | .fb_blank = vmw_fb_blank, | ||
| 371 | }; | ||
| 372 | |||
| 373 | static int vmw_fb_create_bo(struct vmw_private *vmw_priv, | ||
| 374 | size_t size, struct vmw_dma_buffer **out) | ||
| 375 | { | ||
| 376 | struct vmw_dma_buffer *vmw_bo; | ||
| 377 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | ||
| 378 | int ret; | ||
| 379 | |||
| 380 | ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
| 381 | |||
| 382 | /* interuptable? */ | ||
| 383 | ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false); | ||
| 384 | if (unlikely(ret != 0)) | ||
| 385 | return ret; | ||
| 386 | |||
| 387 | vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); | ||
| 388 | if (!vmw_bo) | ||
| 389 | goto err_unlock; | ||
| 390 | |||
| 391 | ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, | ||
| 392 | &ne_placement, | ||
| 393 | false, | ||
| 394 | &vmw_dmabuf_bo_free); | ||
| 395 | if (unlikely(ret != 0)) | ||
| 396 | goto err_unlock; /* init frees the buffer on failure */ | ||
| 397 | |||
| 398 | *out = vmw_bo; | ||
| 399 | |||
| 400 | ttm_write_unlock(&vmw_priv->fbdev_master.lock); | ||
| 401 | |||
| 402 | return 0; | ||
| 403 | |||
| 404 | err_unlock: | ||
| 405 | ttm_write_unlock(&vmw_priv->fbdev_master.lock); | ||
| 406 | return ret; | ||
| 407 | } | ||
| 408 | |||
| 409 | int vmw_fb_init(struct vmw_private *vmw_priv) | ||
| 410 | { | ||
| 411 | struct device *device = &vmw_priv->dev->pdev->dev; | ||
| 412 | struct vmw_fb_par *par; | ||
| 413 | struct fb_info *info; | ||
| 414 | unsigned initial_width, initial_height; | ||
| 415 | unsigned fb_width, fb_height; | ||
| 416 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | ||
| 417 | int ret; | ||
| 418 | |||
| 419 | initial_width = 800; | ||
| 420 | initial_height = 600; | ||
| 421 | |||
| 422 | fb_bbp = 32; | ||
| 423 | fb_depth = 24; | ||
| 424 | |||
| 425 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 426 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | ||
| 427 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | ||
| 428 | } else { | ||
| 429 | fb_width = min(vmw_priv->fb_max_width, initial_width); | ||
| 430 | fb_height = min(vmw_priv->fb_max_height, initial_height); | ||
| 431 | } | ||
| 432 | |||
| 433 | initial_width = min(fb_width, initial_width); | ||
| 434 | initial_height = min(fb_height, initial_height); | ||
| 435 | |||
| 436 | vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); | ||
| 437 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); | ||
| 438 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); | ||
| 439 | vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); | ||
| 440 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 441 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 442 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 443 | |||
| 444 | fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); | ||
| 445 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | ||
| 446 | fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); | ||
| 447 | |||
| 448 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); | ||
| 449 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); | ||
| 450 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); | ||
| 451 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); | ||
| 452 | DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
| 453 | DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); | ||
| 454 | DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); | ||
| 455 | DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); | ||
| 456 | DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); | ||
| 457 | DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); | ||
| 458 | DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); | ||
| 459 | DRM_DEBUG("fb_pitch %u\n", fb_pitch); | ||
| 460 | DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); | ||
| 461 | |||
| 462 | info = framebuffer_alloc(sizeof(*par), device); | ||
| 463 | if (!info) | ||
| 464 | return -ENOMEM; | ||
| 465 | |||
| 466 | /* | ||
| 467 | * Par | ||
| 468 | */ | ||
| 469 | vmw_priv->fb_info = info; | ||
| 470 | par = info->par; | ||
| 471 | par->vmw_priv = vmw_priv; | ||
| 472 | par->depth = fb_depth; | ||
| 473 | par->bpp = fb_bbp; | ||
| 474 | par->vmalloc = NULL; | ||
| 475 | par->max_width = fb_width; | ||
| 476 | par->max_height = fb_height; | ||
| 477 | |||
| 478 | /* | ||
| 479 | * Create buffers and alloc memory | ||
| 480 | */ | ||
| 481 | par->vmalloc = vmalloc(fb_size); | ||
| 482 | if (unlikely(par->vmalloc == NULL)) { | ||
| 483 | ret = -ENOMEM; | ||
| 484 | goto err_free; | ||
| 485 | } | ||
| 486 | |||
| 487 | ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo); | ||
| 488 | if (unlikely(ret != 0)) | ||
| 489 | goto err_free; | ||
| 490 | |||
| 491 | ret = ttm_bo_kmap(&par->vmw_bo->base, | ||
| 492 | 0, | ||
| 493 | par->vmw_bo->base.num_pages, | ||
| 494 | &par->map); | ||
| 495 | if (unlikely(ret != 0)) | ||
| 496 | goto err_unref; | ||
| 497 | par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); | ||
| 498 | par->bo_size = fb_size; | ||
| 499 | |||
| 500 | /* | ||
| 501 | * Fixed and var | ||
| 502 | */ | ||
| 503 | strcpy(info->fix.id, "svgadrmfb"); | ||
| 504 | info->fix.type = FB_TYPE_PACKED_PIXELS; | ||
| 505 | info->fix.visual = FB_VISUAL_TRUECOLOR; | ||
| 506 | info->fix.type_aux = 0; | ||
| 507 | info->fix.xpanstep = 1; /* doing it in hw */ | ||
| 508 | info->fix.ypanstep = 1; /* doing it in hw */ | ||
| 509 | info->fix.ywrapstep = 0; | ||
| 510 | info->fix.accel = FB_ACCEL_NONE; | ||
| 511 | info->fix.line_length = fb_pitch; | ||
| 512 | |||
| 513 | info->fix.smem_start = 0; | ||
| 514 | info->fix.smem_len = fb_size; | ||
| 515 | |||
| 516 | info->fix.mmio_start = 0; | ||
| 517 | info->fix.mmio_len = 0; | ||
| 518 | |||
| 519 | info->pseudo_palette = par->pseudo_palette; | ||
| 520 | info->screen_base = par->vmalloc; | ||
| 521 | info->screen_size = fb_size; | ||
| 522 | |||
| 523 | info->flags = FBINFO_DEFAULT; | ||
| 524 | info->fbops = &vmw_fb_ops; | ||
| 525 | |||
| 526 | /* 24 depth per default */ | ||
| 527 | info->var.red.offset = 16; | ||
| 528 | info->var.green.offset = 8; | ||
| 529 | info->var.blue.offset = 0; | ||
| 530 | info->var.red.length = 8; | ||
| 531 | info->var.green.length = 8; | ||
| 532 | info->var.blue.length = 8; | ||
| 533 | info->var.transp.offset = 0; | ||
| 534 | info->var.transp.length = 0; | ||
| 535 | |||
| 536 | info->var.xres_virtual = fb_width; | ||
| 537 | info->var.yres_virtual = fb_height; | ||
| 538 | info->var.bits_per_pixel = par->bpp; | ||
| 539 | info->var.xoffset = 0; | ||
| 540 | info->var.yoffset = 0; | ||
| 541 | info->var.activate = FB_ACTIVATE_NOW; | ||
| 542 | info->var.height = -1; | ||
| 543 | info->var.width = -1; | ||
| 544 | |||
| 545 | info->var.xres = initial_width; | ||
| 546 | info->var.yres = initial_height; | ||
| 547 | |||
| 548 | #if 0 | ||
| 549 | info->pixmap.size = 64*1024; | ||
| 550 | info->pixmap.buf_align = 8; | ||
| 551 | info->pixmap.access_align = 32; | ||
| 552 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
| 553 | info->pixmap.scan_align = 1; | ||
| 554 | #else | ||
| 555 | info->pixmap.size = 0; | ||
| 556 | info->pixmap.buf_align = 8; | ||
| 557 | info->pixmap.access_align = 32; | ||
| 558 | info->pixmap.flags = FB_PIXMAP_SYSTEM; | ||
| 559 | info->pixmap.scan_align = 1; | ||
| 560 | #endif | ||
| 561 | |||
| 562 | /* | ||
| 563 | * Dirty & Deferred IO | ||
| 564 | */ | ||
| 565 | par->dirty.x1 = par->dirty.x2 = 0; | ||
| 566 | par->dirty.y1 = par->dirty.y1 = 0; | ||
| 567 | par->dirty.active = true; | ||
| 568 | spin_lock_init(&par->dirty.lock); | ||
| 569 | info->fbdefio = &vmw_defio; | ||
| 570 | fb_deferred_io_init(info); | ||
| 571 | |||
| 572 | ret = register_framebuffer(info); | ||
| 573 | if (unlikely(ret != 0)) | ||
| 574 | goto err_defio; | ||
| 575 | |||
| 576 | return 0; | ||
| 577 | |||
| 578 | err_defio: | ||
| 579 | fb_deferred_io_cleanup(info); | ||
| 580 | ttm_bo_kunmap(&par->map); | ||
| 581 | err_unref: | ||
| 582 | ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); | ||
| 583 | err_free: | ||
| 584 | vfree(par->vmalloc); | ||
| 585 | framebuffer_release(info); | ||
| 586 | vmw_priv->fb_info = NULL; | ||
| 587 | |||
| 588 | return ret; | ||
| 589 | } | ||
| 590 | |||
| 591 | int vmw_fb_close(struct vmw_private *vmw_priv) | ||
| 592 | { | ||
| 593 | struct fb_info *info; | ||
| 594 | struct vmw_fb_par *par; | ||
| 595 | struct ttm_buffer_object *bo; | ||
| 596 | |||
| 597 | if (!vmw_priv->fb_info) | ||
| 598 | return 0; | ||
| 599 | |||
| 600 | info = vmw_priv->fb_info; | ||
| 601 | par = info->par; | ||
| 602 | bo = &par->vmw_bo->base; | ||
| 603 | par->vmw_bo = NULL; | ||
| 604 | |||
| 605 | /* ??? order */ | ||
| 606 | fb_deferred_io_cleanup(info); | ||
| 607 | unregister_framebuffer(info); | ||
| 608 | |||
| 609 | ttm_bo_kunmap(&par->map); | ||
| 610 | ttm_bo_unref(&bo); | ||
| 611 | |||
| 612 | vfree(par->vmalloc); | ||
| 613 | framebuffer_release(info); | ||
| 614 | |||
| 615 | return 0; | ||
| 616 | } | ||
| 617 | |||
| 618 | int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, | ||
| 619 | struct vmw_dma_buffer *vmw_bo) | ||
| 620 | { | ||
| 621 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
| 622 | int ret = 0; | ||
| 623 | |||
| 624 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
| 625 | if (unlikely(ret != 0)) | ||
| 626 | return ret; | ||
| 627 | |||
| 628 | ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false); | ||
| 629 | ttm_bo_unreserve(bo); | ||
| 630 | |||
| 631 | return ret; | ||
| 632 | } | ||
| 633 | |||
| 634 | int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | ||
| 635 | struct vmw_dma_buffer *vmw_bo) | ||
| 636 | { | ||
| 637 | struct ttm_buffer_object *bo = &vmw_bo->base; | ||
| 638 | struct ttm_placement ne_placement = vmw_vram_ne_placement; | ||
| 639 | int ret = 0; | ||
| 640 | |||
| 641 | ne_placement.lpfn = bo->num_pages; | ||
| 642 | |||
| 643 | /* interuptable? */ | ||
| 644 | ret = ttm_write_lock(&vmw_priv->active_master->lock, false); | ||
| 645 | if (unlikely(ret != 0)) | ||
| 646 | return ret; | ||
| 647 | |||
| 648 | ret = ttm_bo_reserve(bo, false, false, false, 0); | ||
| 649 | if (unlikely(ret != 0)) | ||
| 650 | goto err_unlock; | ||
| 651 | |||
| 652 | if (vmw_bo->gmr_bound) { | ||
| 653 | vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id); | ||
| 654 | spin_lock(&bo->glob->lru_lock); | ||
| 655 | ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id); | ||
| 656 | spin_unlock(&bo->glob->lru_lock); | ||
| 657 | vmw_bo->gmr_bound = NULL; | ||
| 658 | } | ||
| 659 | |||
| 660 | ret = ttm_bo_validate(bo, &ne_placement, false, false); | ||
| 661 | ttm_bo_unreserve(bo); | ||
| 662 | err_unlock: | ||
| 663 | ttm_write_unlock(&vmw_priv->active_master->lock); | ||
| 664 | |||
| 665 | return ret; | ||
| 666 | } | ||
| 667 | |||
| 668 | int vmw_fb_off(struct vmw_private *vmw_priv) | ||
| 669 | { | ||
| 670 | struct fb_info *info; | ||
| 671 | struct vmw_fb_par *par; | ||
| 672 | unsigned long flags; | ||
| 673 | |||
| 674 | if (!vmw_priv->fb_info) | ||
| 675 | return -EINVAL; | ||
| 676 | |||
| 677 | info = vmw_priv->fb_info; | ||
| 678 | par = info->par; | ||
| 679 | |||
| 680 | spin_lock_irqsave(&par->dirty.lock, flags); | ||
| 681 | par->dirty.active = false; | ||
| 682 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 683 | |||
| 684 | flush_scheduled_work(); | ||
| 685 | |||
| 686 | par->bo_ptr = NULL; | ||
| 687 | ttm_bo_kunmap(&par->map); | ||
| 688 | |||
| 689 | vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); | ||
| 690 | |||
| 691 | return 0; | ||
| 692 | } | ||
| 693 | |||
| 694 | int vmw_fb_on(struct vmw_private *vmw_priv) | ||
| 695 | { | ||
| 696 | struct fb_info *info; | ||
| 697 | struct vmw_fb_par *par; | ||
| 698 | unsigned long flags; | ||
| 699 | bool dummy; | ||
| 700 | int ret; | ||
| 701 | |||
| 702 | if (!vmw_priv->fb_info) | ||
| 703 | return -EINVAL; | ||
| 704 | |||
| 705 | info = vmw_priv->fb_info; | ||
| 706 | par = info->par; | ||
| 707 | |||
| 708 | /* we are already active */ | ||
| 709 | if (par->bo_ptr != NULL) | ||
| 710 | return 0; | ||
| 711 | |||
| 712 | /* Make sure that all overlays are stoped when we take over */ | ||
| 713 | vmw_overlay_stop_all(vmw_priv); | ||
| 714 | |||
| 715 | ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); | ||
| 716 | if (unlikely(ret != 0)) { | ||
| 717 | DRM_ERROR("could not move buffer to start of VRAM\n"); | ||
| 718 | goto err_no_buffer; | ||
| 719 | } | ||
| 720 | |||
| 721 | ret = ttm_bo_kmap(&par->vmw_bo->base, | ||
| 722 | 0, | ||
| 723 | par->vmw_bo->base.num_pages, | ||
| 724 | &par->map); | ||
| 725 | BUG_ON(ret != 0); | ||
| 726 | par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy); | ||
| 727 | |||
| 728 | spin_lock_irqsave(&par->dirty.lock, flags); | ||
| 729 | par->dirty.active = true; | ||
| 730 | spin_unlock_irqrestore(&par->dirty.lock, flags); | ||
| 731 | |||
| 732 | err_no_buffer: | ||
| 733 | vmw_fb_set_par(info); | ||
| 734 | |||
| 735 | vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres); | ||
| 736 | |||
| 737 | /* If there already was stuff dirty we wont | ||
| 738 | * schedule a new work, so lets do it now */ | ||
| 739 | schedule_delayed_work(&info->deferred_work, 0); | ||
| 740 | |||
| 741 | return 0; | ||
| 742 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c new file mode 100644 index 000000000000..76b0693e2458 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -0,0 +1,521 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "drmP.h" | ||
| 30 | #include "ttm/ttm_placement.h" | ||
| 31 | |||
| 32 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | ||
| 33 | { | ||
| 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 35 | uint32_t max; | ||
| 36 | uint32_t min; | ||
| 37 | uint32_t dummy; | ||
| 38 | int ret; | ||
| 39 | |||
| 40 | fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||
| 41 | fifo->static_buffer = vmalloc(fifo->static_buffer_size); | ||
| 42 | if (unlikely(fifo->static_buffer == NULL)) | ||
| 43 | return -ENOMEM; | ||
| 44 | |||
| 45 | fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE; | ||
| 46 | fifo->last_data_size = 0; | ||
| 47 | fifo->last_buffer_add = false; | ||
| 48 | fifo->last_buffer = vmalloc(fifo->last_buffer_size); | ||
| 49 | if (unlikely(fifo->last_buffer == NULL)) { | ||
| 50 | ret = -ENOMEM; | ||
| 51 | goto out_err; | ||
| 52 | } | ||
| 53 | |||
| 54 | fifo->dynamic_buffer = NULL; | ||
| 55 | fifo->reserved_size = 0; | ||
| 56 | fifo->using_bounce_buffer = false; | ||
| 57 | |||
| 58 | init_rwsem(&fifo->rwsem); | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Allow mapping the first page read-only to user-space. | ||
| 62 | */ | ||
| 63 | |||
| 64 | DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); | ||
| 65 | DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); | ||
| 66 | DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
| 67 | |||
| 68 | mutex_lock(&dev_priv->hw_mutex); | ||
| 69 | dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); | ||
| 70 | dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); | ||
| 71 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
| 72 | |||
| 73 | min = 4; | ||
| 74 | if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) | ||
| 75 | min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); | ||
| 76 | min <<= 2; | ||
| 77 | |||
| 78 | if (min < PAGE_SIZE) | ||
| 79 | min = PAGE_SIZE; | ||
| 80 | |||
| 81 | iowrite32(min, fifo_mem + SVGA_FIFO_MIN); | ||
| 82 | iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); | ||
| 83 | wmb(); | ||
| 84 | iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 85 | iowrite32(min, fifo_mem + SVGA_FIFO_STOP); | ||
| 86 | iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); | ||
| 87 | mb(); | ||
| 88 | |||
| 89 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); | ||
| 90 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 91 | |||
| 92 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
| 93 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
| 94 | fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); | ||
| 95 | |||
| 96 | DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", | ||
| 97 | (unsigned int) max, | ||
| 98 | (unsigned int) min, | ||
| 99 | (unsigned int) fifo->capabilities); | ||
| 100 | |||
| 101 | dev_priv->fence_seq = (uint32_t) -100; | ||
| 102 | dev_priv->last_read_sequence = (uint32_t) -100; | ||
| 103 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | ||
| 104 | |||
| 105 | return vmw_fifo_send_fence(dev_priv, &dummy); | ||
| 106 | out_err: | ||
| 107 | vfree(fifo->static_buffer); | ||
| 108 | fifo->static_buffer = NULL; | ||
| 109 | return ret; | ||
| 110 | } | ||
| 111 | |||
| 112 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | ||
| 113 | { | ||
| 114 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 115 | |||
| 116 | mutex_lock(&dev_priv->hw_mutex); | ||
| 117 | |||
| 118 | if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { | ||
| 119 | iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); | ||
| 120 | vmw_write(dev_priv, SVGA_REG_SYNC, reason); | ||
| 121 | } | ||
| 122 | |||
| 123 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 124 | } | ||
| 125 | |||
| 126 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | ||
| 127 | { | ||
| 128 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 129 | |||
| 130 | mutex_lock(&dev_priv->hw_mutex); | ||
| 131 | |||
| 132 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | ||
| 133 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
| 134 | |||
| 135 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
| 136 | |||
| 137 | vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, | ||
| 138 | dev_priv->config_done_state); | ||
| 139 | vmw_write(dev_priv, SVGA_REG_ENABLE, | ||
| 140 | dev_priv->enable_state); | ||
| 141 | |||
| 142 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 143 | |||
| 144 | if (likely(fifo->last_buffer != NULL)) { | ||
| 145 | vfree(fifo->last_buffer); | ||
| 146 | fifo->last_buffer = NULL; | ||
| 147 | } | ||
| 148 | |||
| 149 | if (likely(fifo->static_buffer != NULL)) { | ||
| 150 | vfree(fifo->static_buffer); | ||
| 151 | fifo->static_buffer = NULL; | ||
| 152 | } | ||
| 153 | |||
| 154 | if (likely(fifo->dynamic_buffer != NULL)) { | ||
| 155 | vfree(fifo->dynamic_buffer); | ||
| 156 | fifo->dynamic_buffer = NULL; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 160 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) | ||
| 161 | { | ||
| 162 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 163 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
| 164 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 165 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
| 166 | uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); | ||
| 167 | |||
| 168 | return ((max - next_cmd) + (stop - min) <= bytes); | ||
| 169 | } | ||
| 170 | |||
| 171 | static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, | ||
| 172 | uint32_t bytes, bool interruptible, | ||
| 173 | unsigned long timeout) | ||
| 174 | { | ||
| 175 | int ret = 0; | ||
| 176 | unsigned long end_jiffies = jiffies + timeout; | ||
| 177 | DEFINE_WAIT(__wait); | ||
| 178 | |||
| 179 | DRM_INFO("Fifo wait noirq.\n"); | ||
| 180 | |||
| 181 | for (;;) { | ||
| 182 | prepare_to_wait(&dev_priv->fifo_queue, &__wait, | ||
| 183 | (interruptible) ? | ||
| 184 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
| 185 | if (!vmw_fifo_is_full(dev_priv, bytes)) | ||
| 186 | break; | ||
| 187 | if (time_after_eq(jiffies, end_jiffies)) { | ||
| 188 | ret = -EBUSY; | ||
| 189 | DRM_ERROR("SVGA device lockup.\n"); | ||
| 190 | break; | ||
| 191 | } | ||
| 192 | schedule_timeout(1); | ||
| 193 | if (interruptible && signal_pending(current)) { | ||
| 194 | ret = -ERESTART; | ||
| 195 | break; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | finish_wait(&dev_priv->fifo_queue, &__wait); | ||
| 199 | wake_up_all(&dev_priv->fifo_queue); | ||
| 200 | DRM_INFO("Fifo noirq exit.\n"); | ||
| 201 | return ret; | ||
| 202 | } | ||
| 203 | |||
| 204 | static int vmw_fifo_wait(struct vmw_private *dev_priv, | ||
| 205 | uint32_t bytes, bool interruptible, | ||
| 206 | unsigned long timeout) | ||
| 207 | { | ||
| 208 | long ret = 1L; | ||
| 209 | unsigned long irq_flags; | ||
| 210 | |||
| 211 | if (likely(!vmw_fifo_is_full(dev_priv, bytes))) | ||
| 212 | return 0; | ||
| 213 | |||
| 214 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); | ||
| 215 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
| 216 | return vmw_fifo_wait_noirq(dev_priv, bytes, | ||
| 217 | interruptible, timeout); | ||
| 218 | |||
| 219 | mutex_lock(&dev_priv->hw_mutex); | ||
| 220 | if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { | ||
| 221 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
| 222 | outl(SVGA_IRQFLAG_FIFO_PROGRESS, | ||
| 223 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 224 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
| 225 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
| 226 | SVGA_IRQFLAG_FIFO_PROGRESS); | ||
| 227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
| 228 | } | ||
| 229 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 230 | |||
| 231 | if (interruptible) | ||
| 232 | ret = wait_event_interruptible_timeout | ||
| 233 | (dev_priv->fifo_queue, | ||
| 234 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | ||
| 235 | else | ||
| 236 | ret = wait_event_timeout | ||
| 237 | (dev_priv->fifo_queue, | ||
| 238 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | ||
| 239 | |||
| 240 | if (unlikely(ret == -ERESTARTSYS)) | ||
| 241 | ret = -ERESTART; | ||
| 242 | else if (unlikely(ret == 0)) | ||
| 243 | ret = -EBUSY; | ||
| 244 | else if (likely(ret > 0)) | ||
| 245 | ret = 0; | ||
| 246 | |||
| 247 | mutex_lock(&dev_priv->hw_mutex); | ||
| 248 | if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { | ||
| 249 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
| 250 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
| 251 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
| 252 | ~SVGA_IRQFLAG_FIFO_PROGRESS); | ||
| 253 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
| 254 | } | ||
| 255 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 256 | |||
| 257 | return ret; | ||
| 258 | } | ||
| 259 | |||
| 260 | void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | ||
| 261 | { | ||
| 262 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
| 263 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 264 | uint32_t max; | ||
| 265 | uint32_t min; | ||
| 266 | uint32_t next_cmd; | ||
| 267 | uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | ||
| 268 | int ret; | ||
| 269 | |||
| 270 | down_write(&fifo_state->rwsem); | ||
| 271 | max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
| 272 | min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
| 273 | next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 274 | |||
| 275 | if (unlikely(bytes >= (max - min))) | ||
| 276 | goto out_err; | ||
| 277 | |||
| 278 | BUG_ON(fifo_state->reserved_size != 0); | ||
| 279 | BUG_ON(fifo_state->dynamic_buffer != NULL); | ||
| 280 | |||
| 281 | fifo_state->reserved_size = bytes; | ||
| 282 | |||
| 283 | while (1) { | ||
| 284 | uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); | ||
| 285 | bool need_bounce = false; | ||
| 286 | bool reserve_in_place = false; | ||
| 287 | |||
| 288 | if (next_cmd >= stop) { | ||
| 289 | if (likely((next_cmd + bytes < max || | ||
| 290 | (next_cmd + bytes == max && stop > min)))) | ||
| 291 | reserve_in_place = true; | ||
| 292 | |||
| 293 | else if (vmw_fifo_is_full(dev_priv, bytes)) { | ||
| 294 | ret = vmw_fifo_wait(dev_priv, bytes, | ||
| 295 | false, 3 * HZ); | ||
| 296 | if (unlikely(ret != 0)) | ||
| 297 | goto out_err; | ||
| 298 | } else | ||
| 299 | need_bounce = true; | ||
| 300 | |||
| 301 | } else { | ||
| 302 | |||
| 303 | if (likely((next_cmd + bytes < stop))) | ||
| 304 | reserve_in_place = true; | ||
| 305 | else { | ||
| 306 | ret = vmw_fifo_wait(dev_priv, bytes, | ||
| 307 | false, 3 * HZ); | ||
| 308 | if (unlikely(ret != 0)) | ||
| 309 | goto out_err; | ||
| 310 | } | ||
| 311 | } | ||
| 312 | |||
| 313 | if (reserve_in_place) { | ||
| 314 | if (reserveable || bytes <= sizeof(uint32_t)) { | ||
| 315 | fifo_state->using_bounce_buffer = false; | ||
| 316 | |||
| 317 | if (reserveable) | ||
| 318 | iowrite32(bytes, fifo_mem + | ||
| 319 | SVGA_FIFO_RESERVED); | ||
| 320 | return fifo_mem + (next_cmd >> 2); | ||
| 321 | } else { | ||
| 322 | need_bounce = true; | ||
| 323 | } | ||
| 324 | } | ||
| 325 | |||
| 326 | if (need_bounce) { | ||
| 327 | fifo_state->using_bounce_buffer = true; | ||
| 328 | if (bytes < fifo_state->static_buffer_size) | ||
| 329 | return fifo_state->static_buffer; | ||
| 330 | else { | ||
| 331 | fifo_state->dynamic_buffer = vmalloc(bytes); | ||
| 332 | return fifo_state->dynamic_buffer; | ||
| 333 | } | ||
| 334 | } | ||
| 335 | } | ||
| 336 | out_err: | ||
| 337 | fifo_state->reserved_size = 0; | ||
| 338 | up_write(&fifo_state->rwsem); | ||
| 339 | return NULL; | ||
| 340 | } | ||
| 341 | |||
| 342 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, | ||
| 343 | __le32 __iomem *fifo_mem, | ||
| 344 | uint32_t next_cmd, | ||
| 345 | uint32_t max, uint32_t min, uint32_t bytes) | ||
| 346 | { | ||
| 347 | uint32_t chunk_size = max - next_cmd; | ||
| 348 | uint32_t rest; | ||
| 349 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | ||
| 350 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | ||
| 351 | |||
| 352 | if (bytes < chunk_size) | ||
| 353 | chunk_size = bytes; | ||
| 354 | |||
| 355 | iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); | ||
| 356 | mb(); | ||
| 357 | memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); | ||
| 358 | rest = bytes - chunk_size; | ||
| 359 | if (rest) | ||
| 360 | memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), | ||
| 361 | rest); | ||
| 362 | } | ||
| 363 | |||
| 364 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, | ||
| 365 | __le32 __iomem *fifo_mem, | ||
| 366 | uint32_t next_cmd, | ||
| 367 | uint32_t max, uint32_t min, uint32_t bytes) | ||
| 368 | { | ||
| 369 | uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? | ||
| 370 | fifo_state->dynamic_buffer : fifo_state->static_buffer; | ||
| 371 | |||
| 372 | while (bytes > 0) { | ||
| 373 | iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); | ||
| 374 | next_cmd += sizeof(uint32_t); | ||
| 375 | if (unlikely(next_cmd == max)) | ||
| 376 | next_cmd = min; | ||
| 377 | mb(); | ||
| 378 | iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 379 | mb(); | ||
| 380 | bytes -= sizeof(uint32_t); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | |||
| 384 | void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | ||
| 385 | { | ||
| 386 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
| 387 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 388 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 389 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | ||
| 390 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | ||
| 391 | bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; | ||
| 392 | |||
| 393 | BUG_ON((bytes & 3) != 0); | ||
| 394 | BUG_ON(bytes > fifo_state->reserved_size); | ||
| 395 | |||
| 396 | fifo_state->reserved_size = 0; | ||
| 397 | |||
| 398 | if (fifo_state->using_bounce_buffer) { | ||
| 399 | if (reserveable) | ||
| 400 | vmw_fifo_res_copy(fifo_state, fifo_mem, | ||
| 401 | next_cmd, max, min, bytes); | ||
| 402 | else | ||
| 403 | vmw_fifo_slow_copy(fifo_state, fifo_mem, | ||
| 404 | next_cmd, max, min, bytes); | ||
| 405 | |||
| 406 | if (fifo_state->dynamic_buffer) { | ||
| 407 | vfree(fifo_state->dynamic_buffer); | ||
| 408 | fifo_state->dynamic_buffer = NULL; | ||
| 409 | } | ||
| 410 | |||
| 411 | } | ||
| 412 | |||
| 413 | if (fifo_state->using_bounce_buffer || reserveable) { | ||
| 414 | next_cmd += bytes; | ||
| 415 | if (next_cmd >= max) | ||
| 416 | next_cmd -= max - min; | ||
| 417 | mb(); | ||
| 418 | iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); | ||
| 419 | } | ||
| 420 | |||
| 421 | if (reserveable) | ||
| 422 | iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); | ||
| 423 | mb(); | ||
| 424 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
| 425 | up_write(&fifo_state->rwsem); | ||
| 426 | } | ||
| 427 | |||
| 428 | int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | ||
| 429 | { | ||
| 430 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
| 431 | struct svga_fifo_cmd_fence *cmd_fence; | ||
| 432 | void *fm; | ||
| 433 | int ret = 0; | ||
| 434 | uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); | ||
| 435 | |||
| 436 | fm = vmw_fifo_reserve(dev_priv, bytes); | ||
| 437 | if (unlikely(fm == NULL)) { | ||
| 438 | down_write(&fifo_state->rwsem); | ||
| 439 | *sequence = dev_priv->fence_seq; | ||
| 440 | up_write(&fifo_state->rwsem); | ||
| 441 | ret = -ENOMEM; | ||
| 442 | (void)vmw_fallback_wait(dev_priv, false, true, *sequence, | ||
| 443 | false, 3*HZ); | ||
| 444 | goto out_err; | ||
| 445 | } | ||
| 446 | |||
| 447 | do { | ||
| 448 | *sequence = dev_priv->fence_seq++; | ||
| 449 | } while (*sequence == 0); | ||
| 450 | |||
| 451 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { | ||
| 452 | |||
| 453 | /* | ||
| 454 | * Don't request hardware to send a fence. The | ||
| 455 | * waiting code in vmwgfx_irq.c will emulate this. | ||
| 456 | */ | ||
| 457 | |||
| 458 | vmw_fifo_commit(dev_priv, 0); | ||
| 459 | return 0; | ||
| 460 | } | ||
| 461 | |||
| 462 | *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); | ||
| 463 | cmd_fence = (struct svga_fifo_cmd_fence *) | ||
| 464 | ((unsigned long)fm + sizeof(__le32)); | ||
| 465 | |||
| 466 | iowrite32(*sequence, &cmd_fence->fence); | ||
| 467 | fifo_state->last_buffer_add = true; | ||
| 468 | vmw_fifo_commit(dev_priv, bytes); | ||
| 469 | fifo_state->last_buffer_add = false; | ||
| 470 | |||
| 471 | out_err: | ||
| 472 | return ret; | ||
| 473 | } | ||
| 474 | |||
| 475 | /** | ||
| 476 | * Map the first page of the FIFO read-only to user-space. | ||
| 477 | */ | ||
| 478 | |||
| 479 | static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
| 480 | { | ||
| 481 | int ret; | ||
| 482 | unsigned long address = (unsigned long)vmf->virtual_address; | ||
| 483 | |||
| 484 | if (address != vma->vm_start) | ||
| 485 | return VM_FAULT_SIGBUS; | ||
| 486 | |||
| 487 | ret = vm_insert_pfn(vma, address, vma->vm_pgoff); | ||
| 488 | if (likely(ret == -EBUSY || ret == 0)) | ||
| 489 | return VM_FAULT_NOPAGE; | ||
| 490 | else if (ret == -ENOMEM) | ||
| 491 | return VM_FAULT_OOM; | ||
| 492 | |||
| 493 | return VM_FAULT_SIGBUS; | ||
| 494 | } | ||
| 495 | |||
| 496 | static struct vm_operations_struct vmw_fifo_vm_ops = { | ||
| 497 | .fault = vmw_fifo_vm_fault, | ||
| 498 | .open = NULL, | ||
| 499 | .close = NULL | ||
| 500 | }; | ||
| 501 | |||
| 502 | int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 503 | { | ||
| 504 | struct drm_file *file_priv; | ||
| 505 | struct vmw_private *dev_priv; | ||
| 506 | |||
| 507 | file_priv = (struct drm_file *)filp->private_data; | ||
| 508 | dev_priv = vmw_priv(file_priv->minor->dev); | ||
| 509 | |||
| 510 | if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) || | ||
| 511 | (vma->vm_end - vma->vm_start) != PAGE_SIZE) | ||
| 512 | return -EINVAL; | ||
| 513 | |||
| 514 | vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); | ||
| 515 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED; | ||
| 516 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); | ||
| 517 | vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED, | ||
| 518 | vma->vm_page_prot); | ||
| 519 | vma->vm_ops = &vmw_fifo_vm_ops; | ||
| 520 | return 0; | ||
| 521 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c new file mode 100644 index 000000000000..5f8908a5d7fd --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "drmP.h" | ||
| 30 | #include "ttm/ttm_bo_driver.h" | ||
| 31 | |||
| 32 | /** | ||
| 33 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | ||
| 34 | * the number of used descriptors. | ||
| 35 | */ | ||
| 36 | |||
| 37 | static int vmw_gmr_build_descriptors(struct list_head *desc_pages, | ||
| 38 | struct page *pages[], | ||
| 39 | unsigned long num_pages) | ||
| 40 | { | ||
| 41 | struct page *page, *next; | ||
| 42 | struct svga_guest_mem_descriptor *page_virtual = NULL; | ||
| 43 | struct svga_guest_mem_descriptor *desc_virtual = NULL; | ||
| 44 | unsigned int desc_per_page; | ||
| 45 | unsigned long prev_pfn; | ||
| 46 | unsigned long pfn; | ||
| 47 | int ret; | ||
| 48 | |||
| 49 | desc_per_page = PAGE_SIZE / | ||
| 50 | sizeof(struct svga_guest_mem_descriptor) - 1; | ||
| 51 | |||
| 52 | while (likely(num_pages != 0)) { | ||
| 53 | page = alloc_page(__GFP_HIGHMEM); | ||
| 54 | if (unlikely(page == NULL)) { | ||
| 55 | ret = -ENOMEM; | ||
| 56 | goto out_err; | ||
| 57 | } | ||
| 58 | |||
| 59 | list_add_tail(&page->lru, desc_pages); | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Point previous page terminating descriptor to this | ||
| 63 | * page before unmapping it. | ||
| 64 | */ | ||
| 65 | |||
| 66 | if (likely(page_virtual != NULL)) { | ||
| 67 | desc_virtual->ppn = page_to_pfn(page); | ||
| 68 | kunmap_atomic(page_virtual, KM_USER0); | ||
| 69 | } | ||
| 70 | |||
| 71 | page_virtual = kmap_atomic(page, KM_USER0); | ||
| 72 | desc_virtual = page_virtual - 1; | ||
| 73 | prev_pfn = ~(0UL); | ||
| 74 | |||
| 75 | while (likely(num_pages != 0)) { | ||
| 76 | pfn = page_to_pfn(*pages); | ||
| 77 | |||
| 78 | if (pfn != prev_pfn + 1) { | ||
| 79 | |||
| 80 | if (desc_virtual - page_virtual == | ||
| 81 | desc_per_page - 1) | ||
| 82 | break; | ||
| 83 | |||
| 84 | (++desc_virtual)->ppn = cpu_to_le32(pfn); | ||
| 85 | desc_virtual->num_pages = cpu_to_le32(1); | ||
| 86 | } else { | ||
| 87 | uint32_t tmp = | ||
| 88 | le32_to_cpu(desc_virtual->num_pages); | ||
| 89 | desc_virtual->num_pages = cpu_to_le32(tmp + 1); | ||
| 90 | } | ||
| 91 | prev_pfn = pfn; | ||
| 92 | --num_pages; | ||
| 93 | ++pages; | ||
| 94 | } | ||
| 95 | |||
| 96 | (++desc_virtual)->ppn = cpu_to_le32(0); | ||
| 97 | desc_virtual->num_pages = cpu_to_le32(0); | ||
| 98 | } | ||
| 99 | |||
| 100 | if (likely(page_virtual != NULL)) | ||
| 101 | kunmap_atomic(page_virtual, KM_USER0); | ||
| 102 | |||
| 103 | return 0; | ||
| 104 | out_err: | ||
| 105 | list_for_each_entry_safe(page, next, desc_pages, lru) { | ||
| 106 | list_del_init(&page->lru); | ||
| 107 | __free_page(page); | ||
| 108 | } | ||
| 109 | return ret; | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages) | ||
| 113 | { | ||
| 114 | struct page *page, *next; | ||
| 115 | |||
| 116 | list_for_each_entry_safe(page, next, desc_pages, lru) { | ||
| 117 | list_del_init(&page->lru); | ||
| 118 | __free_page(page); | ||
| 119 | } | ||
| 120 | } | ||
| 121 | |||
| 122 | static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv, | ||
| 123 | int gmr_id, struct list_head *desc_pages) | ||
| 124 | { | ||
| 125 | struct page *page; | ||
| 126 | |||
| 127 | if (unlikely(list_empty(desc_pages))) | ||
| 128 | return; | ||
| 129 | |||
| 130 | page = list_entry(desc_pages->next, struct page, lru); | ||
| 131 | |||
| 132 | mutex_lock(&dev_priv->hw_mutex); | ||
| 133 | |||
| 134 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 135 | wmb(); | ||
| 136 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page)); | ||
| 137 | mb(); | ||
| 138 | |||
| 139 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 140 | |||
| 141 | } | ||
| 142 | |||
| 143 | /** | ||
| 144 | * FIXME: Adjust to the ttm lowmem / highmem storage to minimize | ||
| 145 | * the number of used descriptors. | ||
| 146 | */ | ||
| 147 | |||
| 148 | static unsigned long vmw_gmr_count_descriptors(struct page *pages[], | ||
| 149 | unsigned long num_pages) | ||
| 150 | { | ||
| 151 | unsigned long prev_pfn = ~(0UL); | ||
| 152 | unsigned long pfn; | ||
| 153 | unsigned long descriptors = 0; | ||
| 154 | |||
| 155 | while (num_pages--) { | ||
| 156 | pfn = page_to_pfn(*pages++); | ||
| 157 | if (prev_pfn + 1 != pfn) | ||
| 158 | ++descriptors; | ||
| 159 | prev_pfn = pfn; | ||
| 160 | } | ||
| 161 | |||
| 162 | return descriptors; | ||
| 163 | } | ||
| 164 | |||
| 165 | int vmw_gmr_bind(struct vmw_private *dev_priv, | ||
| 166 | struct ttm_buffer_object *bo) | ||
| 167 | { | ||
| 168 | struct ttm_tt *ttm = bo->ttm; | ||
| 169 | unsigned long descriptors; | ||
| 170 | int ret; | ||
| 171 | uint32_t id; | ||
| 172 | struct list_head desc_pages; | ||
| 173 | |||
| 174 | if (!(dev_priv->capabilities & SVGA_CAP_GMR)) | ||
| 175 | return -EINVAL; | ||
| 176 | |||
| 177 | ret = ttm_tt_populate(ttm); | ||
| 178 | if (unlikely(ret != 0)) | ||
| 179 | return ret; | ||
| 180 | |||
| 181 | descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages); | ||
| 182 | if (unlikely(descriptors > dev_priv->max_gmr_descriptors)) | ||
| 183 | return -EINVAL; | ||
| 184 | |||
| 185 | INIT_LIST_HEAD(&desc_pages); | ||
| 186 | ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages, | ||
| 187 | ttm->num_pages); | ||
| 188 | if (unlikely(ret != 0)) | ||
| 189 | return ret; | ||
| 190 | |||
| 191 | ret = vmw_gmr_id_alloc(dev_priv, &id); | ||
| 192 | if (unlikely(ret != 0)) | ||
| 193 | goto out_no_id; | ||
| 194 | |||
| 195 | vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages); | ||
| 196 | vmw_gmr_free_descriptors(&desc_pages); | ||
| 197 | vmw_dmabuf_set_gmr(bo, id); | ||
| 198 | return 0; | ||
| 199 | |||
| 200 | out_no_id: | ||
| 201 | vmw_gmr_free_descriptors(&desc_pages); | ||
| 202 | return ret; | ||
| 203 | } | ||
| 204 | |||
| 205 | void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id) | ||
| 206 | { | ||
| 207 | mutex_lock(&dev_priv->hw_mutex); | ||
| 208 | vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id); | ||
| 209 | wmb(); | ||
| 210 | vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0); | ||
| 211 | mb(); | ||
| 212 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 213 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c new file mode 100644 index 000000000000..5fa6a4ed238a --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "vmwgfx_drm.h" | ||
| 30 | |||
| 31 | int vmw_getparam_ioctl(struct drm_device *dev, void *data, | ||
| 32 | struct drm_file *file_priv) | ||
| 33 | { | ||
| 34 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 35 | struct drm_vmw_getparam_arg *param = | ||
| 36 | (struct drm_vmw_getparam_arg *)data; | ||
| 37 | |||
| 38 | switch (param->param) { | ||
| 39 | case DRM_VMW_PARAM_NUM_STREAMS: | ||
| 40 | param->value = vmw_overlay_num_overlays(dev_priv); | ||
| 41 | break; | ||
| 42 | case DRM_VMW_PARAM_NUM_FREE_STREAMS: | ||
| 43 | param->value = vmw_overlay_num_free_overlays(dev_priv); | ||
| 44 | break; | ||
| 45 | case DRM_VMW_PARAM_3D: | ||
| 46 | param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0; | ||
| 47 | break; | ||
| 48 | case DRM_VMW_PARAM_FIFO_OFFSET: | ||
| 49 | param->value = dev_priv->mmio_start; | ||
| 50 | break; | ||
| 51 | default: | ||
| 52 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | ||
| 53 | param->param); | ||
| 54 | return -EINVAL; | ||
| 55 | } | ||
| 56 | |||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, | ||
| 61 | struct drm_file *file_priv) | ||
| 62 | { | ||
| 63 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 64 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
| 65 | struct drm_vmw_fifo_debug_arg *arg = | ||
| 66 | (struct drm_vmw_fifo_debug_arg *)data; | ||
| 67 | __le32 __user *buffer = (__le32 __user *) | ||
| 68 | (unsigned long)arg->debug_buffer; | ||
| 69 | |||
| 70 | if (unlikely(fifo_state->last_buffer == NULL)) | ||
| 71 | return -EINVAL; | ||
| 72 | |||
| 73 | if (arg->debug_buffer_size < fifo_state->last_data_size) { | ||
| 74 | arg->used_size = arg->debug_buffer_size; | ||
| 75 | arg->did_not_fit = 1; | ||
| 76 | } else { | ||
| 77 | arg->used_size = fifo_state->last_data_size; | ||
| 78 | arg->did_not_fit = 0; | ||
| 79 | } | ||
| 80 | return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size); | ||
| 81 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c new file mode 100644 index 000000000000..9e0f0306eedb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -0,0 +1,295 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "drmP.h" | ||
| 29 | #include "vmwgfx_drv.h" | ||
| 30 | |||
| 31 | #define VMW_FENCE_WRAP (1 << 24) | ||
| 32 | |||
| 33 | irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) | ||
| 34 | { | ||
| 35 | struct drm_device *dev = (struct drm_device *)arg; | ||
| 36 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 37 | uint32_t status; | ||
| 38 | |||
| 39 | spin_lock(&dev_priv->irq_lock); | ||
| 40 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 41 | spin_unlock(&dev_priv->irq_lock); | ||
| 42 | |||
| 43 | if (status & SVGA_IRQFLAG_ANY_FENCE) | ||
| 44 | wake_up_all(&dev_priv->fence_queue); | ||
| 45 | if (status & SVGA_IRQFLAG_FIFO_PROGRESS) | ||
| 46 | wake_up_all(&dev_priv->fifo_queue); | ||
| 47 | |||
| 48 | if (likely(status)) { | ||
| 49 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 50 | return IRQ_HANDLED; | ||
| 51 | } | ||
| 52 | |||
| 53 | return IRQ_NONE; | ||
| 54 | } | ||
| 55 | |||
| 56 | static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | ||
| 57 | { | ||
| 58 | uint32_t busy; | ||
| 59 | |||
| 60 | mutex_lock(&dev_priv->hw_mutex); | ||
| 61 | busy = vmw_read(dev_priv, SVGA_REG_BUSY); | ||
| 62 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 63 | |||
| 64 | return (busy == 0); | ||
| 65 | } | ||
| 66 | |||
| 67 | |||
| 68 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | ||
| 69 | uint32_t sequence) | ||
| 70 | { | ||
| 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 72 | struct vmw_fifo_state *fifo_state; | ||
| 73 | bool ret; | ||
| 74 | |||
| 75 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
| 76 | return true; | ||
| 77 | |||
| 78 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
| 79 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
| 80 | return true; | ||
| 81 | |||
| 82 | fifo_state = &dev_priv->fifo; | ||
| 83 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | ||
| 84 | vmw_fifo_idle(dev_priv, sequence)) | ||
| 85 | return true; | ||
| 86 | |||
| 87 | /** | ||
| 88 | * Below is to signal stale fences that have wrapped. | ||
| 89 | * First, block fence submission. | ||
| 90 | */ | ||
| 91 | |||
| 92 | down_read(&fifo_state->rwsem); | ||
| 93 | |||
| 94 | /** | ||
| 95 | * Then check if the sequence is higher than what we've actually | ||
| 96 | * emitted. Then the fence is stale and signaled. | ||
| 97 | */ | ||
| 98 | |||
| 99 | ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); | ||
| 100 | up_read(&fifo_state->rwsem); | ||
| 101 | |||
| 102 | return ret; | ||
| 103 | } | ||
| 104 | |||
| 105 | int vmw_fallback_wait(struct vmw_private *dev_priv, | ||
| 106 | bool lazy, | ||
| 107 | bool fifo_idle, | ||
| 108 | uint32_t sequence, | ||
| 109 | bool interruptible, | ||
| 110 | unsigned long timeout) | ||
| 111 | { | ||
| 112 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | ||
| 113 | |||
| 114 | uint32_t count = 0; | ||
| 115 | uint32_t signal_seq; | ||
| 116 | int ret; | ||
| 117 | unsigned long end_jiffies = jiffies + timeout; | ||
| 118 | bool (*wait_condition)(struct vmw_private *, uint32_t); | ||
| 119 | DEFINE_WAIT(__wait); | ||
| 120 | |||
| 121 | wait_condition = (fifo_idle) ? &vmw_fifo_idle : | ||
| 122 | &vmw_fence_signaled; | ||
| 123 | |||
| 124 | /** | ||
| 125 | * Block command submission while waiting for idle. | ||
| 126 | */ | ||
| 127 | |||
| 128 | if (fifo_idle) | ||
| 129 | down_read(&fifo_state->rwsem); | ||
| 130 | signal_seq = dev_priv->fence_seq; | ||
| 131 | ret = 0; | ||
| 132 | |||
| 133 | for (;;) { | ||
| 134 | prepare_to_wait(&dev_priv->fence_queue, &__wait, | ||
| 135 | (interruptible) ? | ||
| 136 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | ||
| 137 | if (wait_condition(dev_priv, sequence)) | ||
| 138 | break; | ||
| 139 | if (time_after_eq(jiffies, end_jiffies)) { | ||
| 140 | DRM_ERROR("SVGA device lockup.\n"); | ||
| 141 | break; | ||
| 142 | } | ||
| 143 | if (lazy) | ||
| 144 | schedule_timeout(1); | ||
| 145 | else if ((++count & 0x0F) == 0) { | ||
| 146 | /** | ||
| 147 | * FIXME: Use schedule_hr_timeout here for | ||
| 148 | * newer kernels and lower CPU utilization. | ||
| 149 | */ | ||
| 150 | |||
| 151 | __set_current_state(TASK_RUNNING); | ||
| 152 | schedule(); | ||
| 153 | __set_current_state((interruptible) ? | ||
| 154 | TASK_INTERRUPTIBLE : | ||
| 155 | TASK_UNINTERRUPTIBLE); | ||
| 156 | } | ||
| 157 | if (interruptible && signal_pending(current)) { | ||
| 158 | ret = -ERESTART; | ||
| 159 | break; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | finish_wait(&dev_priv->fence_queue, &__wait); | ||
| 163 | if (ret == 0 && fifo_idle) { | ||
| 164 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 165 | iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); | ||
| 166 | } | ||
| 167 | wake_up_all(&dev_priv->fence_queue); | ||
| 168 | if (fifo_idle) | ||
| 169 | up_read(&fifo_state->rwsem); | ||
| 170 | |||
| 171 | return ret; | ||
| 172 | } | ||
| 173 | |||
| 174 | int vmw_wait_fence(struct vmw_private *dev_priv, | ||
| 175 | bool lazy, uint32_t sequence, | ||
| 176 | bool interruptible, unsigned long timeout) | ||
| 177 | { | ||
| 178 | long ret; | ||
| 179 | unsigned long irq_flags; | ||
| 180 | struct vmw_fifo_state *fifo = &dev_priv->fifo; | ||
| 181 | |||
| 182 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | ||
| 183 | return 0; | ||
| 184 | |||
| 185 | if (likely(vmw_fence_signaled(dev_priv, sequence))) | ||
| 186 | return 0; | ||
| 187 | |||
| 188 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | ||
| 189 | |||
| 190 | if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) | ||
| 191 | return vmw_fallback_wait(dev_priv, lazy, true, sequence, | ||
| 192 | interruptible, timeout); | ||
| 193 | |||
| 194 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
| 195 | return vmw_fallback_wait(dev_priv, lazy, false, sequence, | ||
| 196 | interruptible, timeout); | ||
| 197 | |||
| 198 | mutex_lock(&dev_priv->hw_mutex); | ||
| 199 | if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) { | ||
| 200 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
| 201 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
| 202 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 203 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
| 204 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
| 205 | SVGA_IRQFLAG_ANY_FENCE); | ||
| 206 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
| 207 | } | ||
| 208 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 209 | |||
| 210 | if (interruptible) | ||
| 211 | ret = wait_event_interruptible_timeout | ||
| 212 | (dev_priv->fence_queue, | ||
| 213 | vmw_fence_signaled(dev_priv, sequence), | ||
| 214 | timeout); | ||
| 215 | else | ||
| 216 | ret = wait_event_timeout | ||
| 217 | (dev_priv->fence_queue, | ||
| 218 | vmw_fence_signaled(dev_priv, sequence), | ||
| 219 | timeout); | ||
| 220 | |||
| 221 | if (unlikely(ret == -ERESTARTSYS)) | ||
| 222 | ret = -ERESTART; | ||
| 223 | else if (unlikely(ret == 0)) | ||
| 224 | ret = -EBUSY; | ||
| 225 | else if (likely(ret > 0)) | ||
| 226 | ret = 0; | ||
| 227 | |||
| 228 | mutex_lock(&dev_priv->hw_mutex); | ||
| 229 | if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) { | ||
| 230 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
| 231 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
| 232 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
| 233 | ~SVGA_IRQFLAG_ANY_FENCE); | ||
| 234 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
| 235 | } | ||
| 236 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 237 | |||
| 238 | return ret; | ||
| 239 | } | ||
| 240 | |||
| 241 | void vmw_irq_preinstall(struct drm_device *dev) | ||
| 242 | { | ||
| 243 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 244 | uint32_t status; | ||
| 245 | |||
| 246 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
| 247 | return; | ||
| 248 | |||
| 249 | spin_lock_init(&dev_priv->irq_lock); | ||
| 250 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 251 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 252 | } | ||
| 253 | |||
| 254 | int vmw_irq_postinstall(struct drm_device *dev) | ||
| 255 | { | ||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | void vmw_irq_uninstall(struct drm_device *dev) | ||
| 260 | { | ||
| 261 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 262 | uint32_t status; | ||
| 263 | |||
| 264 | if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) | ||
| 265 | return; | ||
| 266 | |||
| 267 | mutex_lock(&dev_priv->hw_mutex); | ||
| 268 | vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); | ||
| 269 | mutex_unlock(&dev_priv->hw_mutex); | ||
| 270 | |||
| 271 | status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 272 | outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
| 273 | } | ||
| 274 | |||
| 275 | #define VMW_FENCE_WAIT_TIMEOUT 3*HZ; | ||
| 276 | |||
| 277 | int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, | ||
| 278 | struct drm_file *file_priv) | ||
| 279 | { | ||
| 280 | struct drm_vmw_fence_wait_arg *arg = | ||
| 281 | (struct drm_vmw_fence_wait_arg *)data; | ||
| 282 | unsigned long timeout; | ||
| 283 | |||
| 284 | if (!arg->cookie_valid) { | ||
| 285 | arg->cookie_valid = 1; | ||
| 286 | arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; | ||
| 287 | } | ||
| 288 | |||
| 289 | timeout = jiffies; | ||
| 290 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) | ||
| 291 | return -EBUSY; | ||
| 292 | |||
| 293 | timeout = (unsigned long)arg->kernel_cookie - timeout; | ||
| 294 | return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); | ||
| 295 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c new file mode 100644 index 000000000000..e9403be446fe --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -0,0 +1,872 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_kms.h" | ||
| 29 | |||
| 30 | /* Might need a hrtimer here? */ | ||
| 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | ||
| 32 | |||
| 33 | |||
| 34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | ||
| 35 | { | ||
| 36 | if (du->cursor_surface) | ||
| 37 | vmw_surface_unreference(&du->cursor_surface); | ||
| 38 | if (du->cursor_dmabuf) | ||
| 39 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | ||
| 40 | drm_crtc_cleanup(&du->crtc); | ||
| 41 | drm_encoder_cleanup(&du->encoder); | ||
| 42 | drm_connector_cleanup(&du->connector); | ||
| 43 | } | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Display Unit Cursor functions | ||
| 47 | */ | ||
| 48 | |||
| 49 | int vmw_cursor_update_image(struct vmw_private *dev_priv, | ||
| 50 | u32 *image, u32 width, u32 height, | ||
| 51 | u32 hotspotX, u32 hotspotY) | ||
| 52 | { | ||
| 53 | struct { | ||
| 54 | u32 cmd; | ||
| 55 | SVGAFifoCmdDefineAlphaCursor cursor; | ||
| 56 | } *cmd; | ||
| 57 | u32 image_size = width * height * 4; | ||
| 58 | u32 cmd_size = sizeof(*cmd) + image_size; | ||
| 59 | |||
| 60 | if (!image) | ||
| 61 | return -EINVAL; | ||
| 62 | |||
| 63 | cmd = vmw_fifo_reserve(dev_priv, cmd_size); | ||
| 64 | if (unlikely(cmd == NULL)) { | ||
| 65 | DRM_ERROR("Fifo reserve failed.\n"); | ||
| 66 | return -ENOMEM; | ||
| 67 | } | ||
| 68 | |||
| 69 | memset(cmd, 0, sizeof(*cmd)); | ||
| 70 | |||
| 71 | memcpy(&cmd[1], image, image_size); | ||
| 72 | |||
| 73 | cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); | ||
| 74 | cmd->cursor.id = cpu_to_le32(0); | ||
| 75 | cmd->cursor.width = cpu_to_le32(width); | ||
| 76 | cmd->cursor.height = cpu_to_le32(height); | ||
| 77 | cmd->cursor.hotspotX = cpu_to_le32(hotspotX); | ||
| 78 | cmd->cursor.hotspotY = cpu_to_le32(hotspotY); | ||
| 79 | |||
| 80 | vmw_fifo_commit(dev_priv, cmd_size); | ||
| 81 | |||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | void vmw_cursor_update_position(struct vmw_private *dev_priv, | ||
| 86 | bool show, int x, int y) | ||
| 87 | { | ||
| 88 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
| 89 | uint32_t count; | ||
| 90 | |||
| 91 | iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); | ||
| 92 | iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); | ||
| 93 | iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); | ||
| 94 | count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); | ||
| 95 | iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); | ||
| 96 | } | ||
| 97 | |||
| 98 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
| 99 | uint32_t handle, uint32_t width, uint32_t height) | ||
| 100 | { | ||
| 101 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
| 102 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 103 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | ||
| 104 | struct vmw_surface *surface = NULL; | ||
| 105 | struct vmw_dma_buffer *dmabuf = NULL; | ||
| 106 | int ret; | ||
| 107 | |||
| 108 | if (handle) { | ||
| 109 | ret = vmw_user_surface_lookup(dev_priv, tfile, | ||
| 110 | handle, &surface); | ||
| 111 | if (!ret) { | ||
| 112 | if (!surface->snooper.image) { | ||
| 113 | DRM_ERROR("surface not suitable for cursor\n"); | ||
| 114 | return -EINVAL; | ||
| 115 | } | ||
| 116 | } else { | ||
| 117 | ret = vmw_user_dmabuf_lookup(tfile, | ||
| 118 | handle, &dmabuf); | ||
| 119 | if (ret) { | ||
| 120 | DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); | ||
| 121 | return -EINVAL; | ||
| 122 | } | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | /* takedown old cursor */ | ||
| 127 | if (du->cursor_surface) { | ||
| 128 | du->cursor_surface->snooper.crtc = NULL; | ||
| 129 | vmw_surface_unreference(&du->cursor_surface); | ||
| 130 | } | ||
| 131 | if (du->cursor_dmabuf) | ||
| 132 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | ||
| 133 | |||
| 134 | /* setup new image */ | ||
| 135 | if (surface) { | ||
| 136 | /* vmw_user_surface_lookup takes one reference */ | ||
| 137 | du->cursor_surface = surface; | ||
| 138 | |||
| 139 | du->cursor_surface->snooper.crtc = crtc; | ||
| 140 | du->cursor_age = du->cursor_surface->snooper.age; | ||
| 141 | vmw_cursor_update_image(dev_priv, surface->snooper.image, | ||
| 142 | 64, 64, du->hotspot_x, du->hotspot_y); | ||
| 143 | } else if (dmabuf) { | ||
| 144 | struct ttm_bo_kmap_obj map; | ||
| 145 | unsigned long kmap_offset; | ||
| 146 | unsigned long kmap_num; | ||
| 147 | void *virtual; | ||
| 148 | bool dummy; | ||
| 149 | |||
| 150 | /* vmw_user_surface_lookup takes one reference */ | ||
| 151 | du->cursor_dmabuf = dmabuf; | ||
| 152 | |||
| 153 | kmap_offset = 0; | ||
| 154 | kmap_num = (64*64*4) >> PAGE_SHIFT; | ||
| 155 | |||
| 156 | ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); | ||
| 157 | if (unlikely(ret != 0)) { | ||
| 158 | DRM_ERROR("reserve failed\n"); | ||
| 159 | return -EINVAL; | ||
| 160 | } | ||
| 161 | |||
| 162 | ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); | ||
| 163 | if (unlikely(ret != 0)) | ||
| 164 | goto err_unreserve; | ||
| 165 | |||
| 166 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | ||
| 167 | vmw_cursor_update_image(dev_priv, virtual, 64, 64, | ||
| 168 | du->hotspot_x, du->hotspot_y); | ||
| 169 | |||
| 170 | ttm_bo_kunmap(&map); | ||
| 171 | err_unreserve: | ||
| 172 | ttm_bo_unreserve(&dmabuf->base); | ||
| 173 | |||
| 174 | } else { | ||
| 175 | vmw_cursor_update_position(dev_priv, false, 0, 0); | ||
| 176 | return 0; | ||
| 177 | } | ||
| 178 | |||
| 179 | vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); | ||
| 180 | |||
| 181 | return 0; | ||
| 182 | } | ||
| 183 | |||
| 184 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
| 185 | { | ||
| 186 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
| 187 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | ||
| 188 | bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; | ||
| 189 | |||
| 190 | du->cursor_x = x + crtc->x; | ||
| 191 | du->cursor_y = y + crtc->y; | ||
| 192 | |||
| 193 | vmw_cursor_update_position(dev_priv, shown, | ||
| 194 | du->cursor_x, du->cursor_y); | ||
| 195 | |||
| 196 | return 0; | ||
| 197 | } | ||
| 198 | |||
| 199 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, | ||
| 200 | struct ttm_object_file *tfile, | ||
| 201 | struct ttm_buffer_object *bo, | ||
| 202 | SVGA3dCmdHeader *header) | ||
| 203 | { | ||
| 204 | struct ttm_bo_kmap_obj map; | ||
| 205 | unsigned long kmap_offset; | ||
| 206 | unsigned long kmap_num; | ||
| 207 | SVGA3dCopyBox *box; | ||
| 208 | unsigned box_count; | ||
| 209 | void *virtual; | ||
| 210 | bool dummy; | ||
| 211 | struct vmw_dma_cmd { | ||
| 212 | SVGA3dCmdHeader header; | ||
| 213 | SVGA3dCmdSurfaceDMA dma; | ||
| 214 | } *cmd; | ||
| 215 | int ret; | ||
| 216 | |||
| 217 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
| 218 | |||
| 219 | /* No snooper installed */ | ||
| 220 | if (!srf->snooper.image) | ||
| 221 | return; | ||
| 222 | |||
| 223 | if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { | ||
| 224 | DRM_ERROR("face and mipmap for cursors should never != 0\n"); | ||
| 225 | return; | ||
| 226 | } | ||
| 227 | |||
| 228 | if (cmd->header.size < 64) { | ||
| 229 | DRM_ERROR("at least one full copy box must be given\n"); | ||
| 230 | return; | ||
| 231 | } | ||
| 232 | |||
| 233 | box = (SVGA3dCopyBox *)&cmd[1]; | ||
| 234 | box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / | ||
| 235 | sizeof(SVGA3dCopyBox); | ||
| 236 | |||
| 237 | if (cmd->dma.guest.pitch != (64 * 4) || | ||
| 238 | cmd->dma.guest.ptr.offset % PAGE_SIZE || | ||
| 239 | box->x != 0 || box->y != 0 || box->z != 0 || | ||
| 240 | box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || | ||
| 241 | box->w != 64 || box->h != 64 || box->d != 1 || | ||
| 242 | box_count != 1) { | ||
| 243 | /* TODO handle none page aligned offsets */ | ||
| 244 | /* TODO handle partial uploads and pitch != 256 */ | ||
| 245 | /* TODO handle more then one copy (size != 64) */ | ||
| 246 | DRM_ERROR("lazy programer, cant handle wierd stuff\n"); | ||
| 247 | return; | ||
| 248 | } | ||
| 249 | |||
| 250 | kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; | ||
| 251 | kmap_num = (64*64*4) >> PAGE_SHIFT; | ||
| 252 | |||
| 253 | ret = ttm_bo_reserve(bo, true, false, false, 0); | ||
| 254 | if (unlikely(ret != 0)) { | ||
| 255 | DRM_ERROR("reserve failed\n"); | ||
| 256 | return; | ||
| 257 | } | ||
| 258 | |||
| 259 | ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); | ||
| 260 | if (unlikely(ret != 0)) | ||
| 261 | goto err_unreserve; | ||
| 262 | |||
| 263 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | ||
| 264 | |||
| 265 | memcpy(srf->snooper.image, virtual, 64*64*4); | ||
| 266 | srf->snooper.age++; | ||
| 267 | |||
| 268 | /* we can't call this function from this function since execbuf has | ||
| 269 | * reserved fifo space. | ||
| 270 | * | ||
| 271 | * if (srf->snooper.crtc) | ||
| 272 | * vmw_ldu_crtc_cursor_update_image(dev_priv, | ||
| 273 | * srf->snooper.image, 64, 64, | ||
| 274 | * du->hotspot_x, du->hotspot_y); | ||
| 275 | */ | ||
| 276 | |||
| 277 | ttm_bo_kunmap(&map); | ||
| 278 | err_unreserve: | ||
| 279 | ttm_bo_unreserve(bo); | ||
| 280 | } | ||
| 281 | |||
| 282 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | ||
| 283 | { | ||
| 284 | struct drm_device *dev = dev_priv->dev; | ||
| 285 | struct vmw_display_unit *du; | ||
| 286 | struct drm_crtc *crtc; | ||
| 287 | |||
| 288 | mutex_lock(&dev->mode_config.mutex); | ||
| 289 | |||
| 290 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 291 | du = vmw_crtc_to_du(crtc); | ||
| 292 | if (!du->cursor_surface || | ||
| 293 | du->cursor_age == du->cursor_surface->snooper.age) | ||
| 294 | continue; | ||
| 295 | |||
| 296 | du->cursor_age = du->cursor_surface->snooper.age; | ||
| 297 | vmw_cursor_update_image(dev_priv, | ||
| 298 | du->cursor_surface->snooper.image, | ||
| 299 | 64, 64, du->hotspot_x, du->hotspot_y); | ||
| 300 | } | ||
| 301 | |||
| 302 | mutex_unlock(&dev->mode_config.mutex); | ||
| 303 | } | ||
| 304 | |||
| 305 | /* | ||
| 306 | * Generic framebuffer code | ||
| 307 | */ | ||
| 308 | |||
| 309 | int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | ||
| 310 | struct drm_file *file_priv, | ||
| 311 | unsigned int *handle) | ||
| 312 | { | ||
| 313 | if (handle) | ||
| 314 | handle = 0; | ||
| 315 | |||
| 316 | return 0; | ||
| 317 | } | ||
| 318 | |||
| 319 | /* | ||
| 320 | * Surface framebuffer code | ||
| 321 | */ | ||
| 322 | |||
| 323 | #define vmw_framebuffer_to_vfbs(x) \ | ||
| 324 | container_of(x, struct vmw_framebuffer_surface, base.base) | ||
| 325 | |||
| 326 | struct vmw_framebuffer_surface { | ||
| 327 | struct vmw_framebuffer base; | ||
| 328 | struct vmw_surface *surface; | ||
| 329 | struct delayed_work d_work; | ||
| 330 | struct mutex work_lock; | ||
| 331 | bool present_fs; | ||
| 332 | }; | ||
| 333 | |||
| 334 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | ||
| 335 | { | ||
| 336 | struct vmw_framebuffer_surface *vfb = | ||
| 337 | vmw_framebuffer_to_vfbs(framebuffer); | ||
| 338 | |||
| 339 | cancel_delayed_work_sync(&vfb->d_work); | ||
| 340 | drm_framebuffer_cleanup(framebuffer); | ||
| 341 | vmw_surface_unreference(&vfb->surface); | ||
| 342 | |||
| 343 | kfree(framebuffer); | ||
| 344 | } | ||
| 345 | |||
| 346 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | ||
| 347 | { | ||
| 348 | struct delayed_work *d_work = | ||
| 349 | container_of(work, struct delayed_work, work); | ||
| 350 | struct vmw_framebuffer_surface *vfbs = | ||
| 351 | container_of(d_work, struct vmw_framebuffer_surface, d_work); | ||
| 352 | struct vmw_surface *surf = vfbs->surface; | ||
| 353 | struct drm_framebuffer *framebuffer = &vfbs->base.base; | ||
| 354 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
| 355 | |||
| 356 | struct { | ||
| 357 | SVGA3dCmdHeader header; | ||
| 358 | SVGA3dCmdPresent body; | ||
| 359 | SVGA3dCopyRect cr; | ||
| 360 | } *cmd; | ||
| 361 | |||
| 362 | mutex_lock(&vfbs->work_lock); | ||
| 363 | if (!vfbs->present_fs) | ||
| 364 | goto out_unlock; | ||
| 365 | |||
| 366 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 367 | if (unlikely(cmd == NULL)) | ||
| 368 | goto out_resched; | ||
| 369 | |||
| 370 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
| 371 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); | ||
| 372 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
| 373 | cmd->cr.x = cpu_to_le32(0); | ||
| 374 | cmd->cr.y = cpu_to_le32(0); | ||
| 375 | cmd->cr.srcx = cmd->cr.x; | ||
| 376 | cmd->cr.srcy = cmd->cr.y; | ||
| 377 | cmd->cr.w = cpu_to_le32(framebuffer->width); | ||
| 378 | cmd->cr.h = cpu_to_le32(framebuffer->height); | ||
| 379 | vfbs->present_fs = false; | ||
| 380 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 381 | out_resched: | ||
| 382 | /** | ||
| 383 | * Will not re-add if already pending. | ||
| 384 | */ | ||
| 385 | schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
| 386 | out_unlock: | ||
| 387 | mutex_unlock(&vfbs->work_lock); | ||
| 388 | } | ||
| 389 | |||
| 390 | |||
| 391 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | ||
| 392 | unsigned flags, unsigned color, | ||
| 393 | struct drm_clip_rect *clips, | ||
| 394 | unsigned num_clips) | ||
| 395 | { | ||
| 396 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
| 397 | struct vmw_framebuffer_surface *vfbs = | ||
| 398 | vmw_framebuffer_to_vfbs(framebuffer); | ||
| 399 | struct vmw_surface *surf = vfbs->surface; | ||
| 400 | struct drm_clip_rect norect; | ||
| 401 | SVGA3dCopyRect *cr; | ||
| 402 | int i, inc = 1; | ||
| 403 | |||
| 404 | struct { | ||
| 405 | SVGA3dCmdHeader header; | ||
| 406 | SVGA3dCmdPresent body; | ||
| 407 | SVGA3dCopyRect cr; | ||
| 408 | } *cmd; | ||
| 409 | |||
| 410 | if (!num_clips || | ||
| 411 | !(dev_priv->fifo.capabilities & | ||
| 412 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
| 413 | int ret; | ||
| 414 | |||
| 415 | mutex_lock(&vfbs->work_lock); | ||
| 416 | vfbs->present_fs = true; | ||
| 417 | ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
| 418 | mutex_unlock(&vfbs->work_lock); | ||
| 419 | if (ret) { | ||
| 420 | /** | ||
| 421 | * No work pending, Force immediate present. | ||
| 422 | */ | ||
| 423 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | ||
| 424 | } | ||
| 425 | return 0; | ||
| 426 | } | ||
| 427 | |||
| 428 | if (!num_clips) { | ||
| 429 | num_clips = 1; | ||
| 430 | clips = &norect; | ||
| 431 | norect.x1 = norect.y1 = 0; | ||
| 432 | norect.x2 = framebuffer->width; | ||
| 433 | norect.y2 = framebuffer->height; | ||
| 434 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
| 435 | num_clips /= 2; | ||
| 436 | inc = 2; /* skip source rects */ | ||
| 437 | } | ||
| 438 | |||
| 439 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
| 440 | if (unlikely(cmd == NULL)) { | ||
| 441 | DRM_ERROR("Fifo reserve failed.\n"); | ||
| 442 | return -ENOMEM; | ||
| 443 | } | ||
| 444 | |||
| 445 | memset(cmd, 0, sizeof(*cmd)); | ||
| 446 | |||
| 447 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
| 448 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); | ||
| 449 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
| 450 | |||
| 451 | for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { | ||
| 452 | cr->x = cpu_to_le16(clips->x1); | ||
| 453 | cr->y = cpu_to_le16(clips->y1); | ||
| 454 | cr->srcx = cr->x; | ||
| 455 | cr->srcy = cr->y; | ||
| 456 | cr->w = cpu_to_le16(clips->x2 - clips->x1); | ||
| 457 | cr->h = cpu_to_le16(clips->y2 - clips->y1); | ||
| 458 | } | ||
| 459 | |||
| 460 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
| 461 | |||
| 462 | return 0; | ||
| 463 | } | ||
| 464 | |||
| 465 | static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { | ||
| 466 | .destroy = vmw_framebuffer_surface_destroy, | ||
| 467 | .dirty = vmw_framebuffer_surface_dirty, | ||
| 468 | .create_handle = vmw_framebuffer_create_handle, | ||
| 469 | }; | ||
| 470 | |||
| 471 | int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | ||
| 472 | struct vmw_surface *surface, | ||
| 473 | struct vmw_framebuffer **out, | ||
| 474 | unsigned width, unsigned height) | ||
| 475 | |||
| 476 | { | ||
| 477 | struct drm_device *dev = dev_priv->dev; | ||
| 478 | struct vmw_framebuffer_surface *vfbs; | ||
| 479 | int ret; | ||
| 480 | |||
| 481 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); | ||
| 482 | if (!vfbs) { | ||
| 483 | ret = -ENOMEM; | ||
| 484 | goto out_err1; | ||
| 485 | } | ||
| 486 | |||
| 487 | ret = drm_framebuffer_init(dev, &vfbs->base.base, | ||
| 488 | &vmw_framebuffer_surface_funcs); | ||
| 489 | if (ret) | ||
| 490 | goto out_err2; | ||
| 491 | |||
| 492 | if (!vmw_surface_reference(surface)) { | ||
| 493 | DRM_ERROR("failed to reference surface %p\n", surface); | ||
| 494 | goto out_err3; | ||
| 495 | } | ||
| 496 | |||
| 497 | /* XXX get the first 3 from the surface info */ | ||
| 498 | vfbs->base.base.bits_per_pixel = 32; | ||
| 499 | vfbs->base.base.pitch = width * 32 / 4; | ||
| 500 | vfbs->base.base.depth = 24; | ||
| 501 | vfbs->base.base.width = width; | ||
| 502 | vfbs->base.base.height = height; | ||
| 503 | vfbs->base.pin = NULL; | ||
| 504 | vfbs->base.unpin = NULL; | ||
| 505 | vfbs->surface = surface; | ||
| 506 | mutex_init(&vfbs->work_lock); | ||
| 507 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | ||
| 508 | *out = &vfbs->base; | ||
| 509 | |||
| 510 | return 0; | ||
| 511 | |||
| 512 | out_err3: | ||
| 513 | drm_framebuffer_cleanup(&vfbs->base.base); | ||
| 514 | out_err2: | ||
| 515 | kfree(vfbs); | ||
| 516 | out_err1: | ||
| 517 | return ret; | ||
| 518 | } | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Dmabuf framebuffer code | ||
| 522 | */ | ||
| 523 | |||
| 524 | #define vmw_framebuffer_to_vfbd(x) \ | ||
| 525 | container_of(x, struct vmw_framebuffer_dmabuf, base.base) | ||
| 526 | |||
| 527 | struct vmw_framebuffer_dmabuf { | ||
| 528 | struct vmw_framebuffer base; | ||
| 529 | struct vmw_dma_buffer *buffer; | ||
| 530 | }; | ||
| 531 | |||
| 532 | void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | ||
| 533 | { | ||
| 534 | struct vmw_framebuffer_dmabuf *vfbd = | ||
| 535 | vmw_framebuffer_to_vfbd(framebuffer); | ||
| 536 | |||
| 537 | drm_framebuffer_cleanup(framebuffer); | ||
| 538 | vmw_dmabuf_unreference(&vfbd->buffer); | ||
| 539 | |||
| 540 | kfree(vfbd); | ||
| 541 | } | ||
| 542 | |||
| 543 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | ||
| 544 | unsigned flags, unsigned color, | ||
| 545 | struct drm_clip_rect *clips, | ||
| 546 | unsigned num_clips) | ||
| 547 | { | ||
| 548 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
| 549 | struct drm_clip_rect norect; | ||
| 550 | struct { | ||
| 551 | uint32_t header; | ||
| 552 | SVGAFifoCmdUpdate body; | ||
| 553 | } *cmd; | ||
| 554 | int i, increment = 1; | ||
| 555 | |||
| 556 | if (!num_clips || | ||
| 557 | !(dev_priv->fifo.capabilities & | ||
| 558 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
| 559 | num_clips = 1; | ||
| 560 | clips = &norect; | ||
| 561 | norect.x1 = norect.y1 = 0; | ||
| 562 | norect.x2 = framebuffer->width; | ||
| 563 | norect.y2 = framebuffer->height; | ||
| 564 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
| 565 | num_clips /= 2; | ||
| 566 | increment = 2; | ||
| 567 | } | ||
| 568 | |||
| 569 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | ||
| 570 | if (unlikely(cmd == NULL)) { | ||
| 571 | DRM_ERROR("Fifo reserve failed.\n"); | ||
| 572 | return -ENOMEM; | ||
| 573 | } | ||
| 574 | |||
| 575 | for (i = 0; i < num_clips; i++, clips += increment) { | ||
| 576 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | ||
| 577 | cmd[i].body.x = cpu_to_le32(clips[i].x1); | ||
| 578 | cmd[i].body.y = cpu_to_le32(clips[i].y1); | ||
| 579 | cmd[i].body.width = cpu_to_le32(clips[i].x2 - clips[i].x1); | ||
| 580 | cmd[i].body.height = cpu_to_le32(clips[i].y2 - clips[i].y1); | ||
| 581 | } | ||
| 582 | |||
| 583 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | ||
| 584 | |||
| 585 | return 0; | ||
| 586 | } | ||
| 587 | |||
| 588 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | ||
| 589 | .destroy = vmw_framebuffer_dmabuf_destroy, | ||
| 590 | .dirty = vmw_framebuffer_dmabuf_dirty, | ||
| 591 | .create_handle = vmw_framebuffer_create_handle, | ||
| 592 | }; | ||
| 593 | |||
| 594 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
| 595 | { | ||
| 596 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
| 597 | struct vmw_framebuffer_dmabuf *vfbd = | ||
| 598 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
| 599 | int ret; | ||
| 600 | |||
| 601 | vmw_overlay_pause_all(dev_priv); | ||
| 602 | |||
| 603 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | ||
| 604 | |||
| 605 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 606 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 607 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 608 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 609 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 610 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 611 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 612 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 613 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 614 | |||
| 615 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
| 616 | vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); | ||
| 617 | vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); | ||
| 618 | vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); | ||
| 619 | vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); | ||
| 620 | vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
| 621 | vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
| 622 | vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
| 623 | } else | ||
| 624 | WARN_ON(true); | ||
| 625 | |||
| 626 | vmw_overlay_resume_all(dev_priv); | ||
| 627 | |||
| 628 | return 0; | ||
| 629 | } | ||
| 630 | |||
| 631 | static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | ||
| 632 | { | ||
| 633 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
| 634 | struct vmw_framebuffer_dmabuf *vfbd = | ||
| 635 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
| 636 | |||
| 637 | if (!vfbd->buffer) { | ||
| 638 | WARN_ON(!vfbd->buffer); | ||
| 639 | return 0; | ||
| 640 | } | ||
| 641 | |||
| 642 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | ||
| 643 | } | ||
| 644 | |||
| 645 | int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | ||
| 646 | struct vmw_dma_buffer *dmabuf, | ||
| 647 | struct vmw_framebuffer **out, | ||
| 648 | unsigned width, unsigned height) | ||
| 649 | |||
| 650 | { | ||
| 651 | struct drm_device *dev = dev_priv->dev; | ||
| 652 | struct vmw_framebuffer_dmabuf *vfbd; | ||
| 653 | int ret; | ||
| 654 | |||
| 655 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | ||
| 656 | if (!vfbd) { | ||
| 657 | ret = -ENOMEM; | ||
| 658 | goto out_err1; | ||
| 659 | } | ||
| 660 | |||
| 661 | ret = drm_framebuffer_init(dev, &vfbd->base.base, | ||
| 662 | &vmw_framebuffer_dmabuf_funcs); | ||
| 663 | if (ret) | ||
| 664 | goto out_err2; | ||
| 665 | |||
| 666 | if (!vmw_dmabuf_reference(dmabuf)) { | ||
| 667 | DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); | ||
| 668 | goto out_err3; | ||
| 669 | } | ||
| 670 | |||
| 671 | /* XXX get the first 3 from the surface info */ | ||
| 672 | vfbd->base.base.bits_per_pixel = 32; | ||
| 673 | vfbd->base.base.pitch = width * 32 / 4; | ||
| 674 | vfbd->base.base.depth = 24; | ||
| 675 | vfbd->base.base.width = width; | ||
| 676 | vfbd->base.base.height = height; | ||
| 677 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | ||
| 678 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | ||
| 679 | vfbd->buffer = dmabuf; | ||
| 680 | *out = &vfbd->base; | ||
| 681 | |||
| 682 | return 0; | ||
| 683 | |||
| 684 | out_err3: | ||
| 685 | drm_framebuffer_cleanup(&vfbd->base.base); | ||
| 686 | out_err2: | ||
| 687 | kfree(vfbd); | ||
| 688 | out_err1: | ||
| 689 | return ret; | ||
| 690 | } | ||
| 691 | |||
| 692 | /* | ||
| 693 | * Generic Kernel modesetting functions | ||
| 694 | */ | ||
| 695 | |||
| 696 | static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | ||
| 697 | struct drm_file *file_priv, | ||
| 698 | struct drm_mode_fb_cmd *mode_cmd) | ||
| 699 | { | ||
| 700 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 701 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 702 | struct vmw_framebuffer *vfb = NULL; | ||
| 703 | struct vmw_surface *surface = NULL; | ||
| 704 | struct vmw_dma_buffer *bo = NULL; | ||
| 705 | int ret; | ||
| 706 | |||
| 707 | ret = vmw_user_surface_lookup(dev_priv, tfile, | ||
| 708 | mode_cmd->handle, &surface); | ||
| 709 | if (ret) | ||
| 710 | goto try_dmabuf; | ||
| 711 | |||
| 712 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | ||
| 713 | mode_cmd->width, mode_cmd->height); | ||
| 714 | |||
| 715 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | ||
| 716 | vmw_surface_unreference(&surface); | ||
| 717 | |||
| 718 | if (ret) { | ||
| 719 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | ||
| 720 | return NULL; | ||
| 721 | } | ||
| 722 | return &vfb->base; | ||
| 723 | |||
| 724 | try_dmabuf: | ||
| 725 | DRM_INFO("%s: trying buffer\n", __func__); | ||
| 726 | |||
| 727 | ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo); | ||
| 728 | if (ret) { | ||
| 729 | DRM_ERROR("failed to find buffer: %i\n", ret); | ||
| 730 | return NULL; | ||
| 731 | } | ||
| 732 | |||
| 733 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, | ||
| 734 | mode_cmd->width, mode_cmd->height); | ||
| 735 | |||
| 736 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ | ||
| 737 | vmw_dmabuf_unreference(&bo); | ||
| 738 | |||
| 739 | if (ret) { | ||
| 740 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | ||
| 741 | return NULL; | ||
| 742 | } | ||
| 743 | |||
| 744 | return &vfb->base; | ||
| 745 | } | ||
| 746 | |||
| 747 | static int vmw_kms_fb_changed(struct drm_device *dev) | ||
| 748 | { | ||
| 749 | return 0; | ||
| 750 | } | ||
| 751 | |||
| 752 | static struct drm_mode_config_funcs vmw_kms_funcs = { | ||
| 753 | .fb_create = vmw_kms_fb_create, | ||
| 754 | .fb_changed = vmw_kms_fb_changed, | ||
| 755 | }; | ||
| 756 | |||
| 757 | int vmw_kms_init(struct vmw_private *dev_priv) | ||
| 758 | { | ||
| 759 | struct drm_device *dev = dev_priv->dev; | ||
| 760 | int ret; | ||
| 761 | |||
| 762 | drm_mode_config_init(dev); | ||
| 763 | dev->mode_config.funcs = &vmw_kms_funcs; | ||
| 764 | dev->mode_config.min_width = 640; | ||
| 765 | dev->mode_config.min_height = 480; | ||
| 766 | dev->mode_config.max_width = 2048; | ||
| 767 | dev->mode_config.max_height = 2048; | ||
| 768 | |||
| 769 | ret = vmw_kms_init_legacy_display_system(dev_priv); | ||
| 770 | |||
| 771 | return 0; | ||
| 772 | } | ||
| 773 | |||
| 774 | int vmw_kms_close(struct vmw_private *dev_priv) | ||
| 775 | { | ||
| 776 | /* | ||
| 777 | * Docs says we should take the lock before calling this function | ||
| 778 | * but since it destroys encoders and our destructor calls | ||
| 779 | * drm_encoder_cleanup which takes the lock we deadlock. | ||
| 780 | */ | ||
| 781 | drm_mode_config_cleanup(dev_priv->dev); | ||
| 782 | vmw_kms_close_legacy_display_system(dev_priv); | ||
| 783 | return 0; | ||
| 784 | } | ||
| 785 | |||
| 786 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, | ||
| 787 | struct drm_file *file_priv) | ||
| 788 | { | ||
| 789 | struct drm_vmw_cursor_bypass_arg *arg = data; | ||
| 790 | struct vmw_display_unit *du; | ||
| 791 | struct drm_mode_object *obj; | ||
| 792 | struct drm_crtc *crtc; | ||
| 793 | int ret = 0; | ||
| 794 | |||
| 795 | |||
| 796 | mutex_lock(&dev->mode_config.mutex); | ||
| 797 | if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { | ||
| 798 | |||
| 799 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
| 800 | du = vmw_crtc_to_du(crtc); | ||
| 801 | du->hotspot_x = arg->xhot; | ||
| 802 | du->hotspot_y = arg->yhot; | ||
| 803 | } | ||
| 804 | |||
| 805 | mutex_unlock(&dev->mode_config.mutex); | ||
| 806 | return 0; | ||
| 807 | } | ||
| 808 | |||
| 809 | obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
| 810 | if (!obj) { | ||
| 811 | ret = -EINVAL; | ||
| 812 | goto out; | ||
| 813 | } | ||
| 814 | |||
| 815 | crtc = obj_to_crtc(obj); | ||
| 816 | du = vmw_crtc_to_du(crtc); | ||
| 817 | |||
| 818 | du->hotspot_x = arg->xhot; | ||
| 819 | du->hotspot_y = arg->yhot; | ||
| 820 | |||
| 821 | out: | ||
| 822 | mutex_unlock(&dev->mode_config.mutex); | ||
| 823 | |||
| 824 | return ret; | ||
| 825 | } | ||
| 826 | |||
| 827 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | ||
| 828 | { | ||
| 829 | /* | ||
| 830 | * setup a single multimon monitor with the size | ||
| 831 | * of 0x0, this stops the UI from resizing when we | ||
| 832 | * change the framebuffer size | ||
| 833 | */ | ||
| 834 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
| 835 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
| 836 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
| 837 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
| 838 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 839 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 840 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 841 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 842 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 843 | } | ||
| 844 | |||
| 845 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | ||
| 846 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | ||
| 847 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
| 848 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); | ||
| 849 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); | ||
| 850 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | ||
| 851 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
| 852 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | ||
| 853 | |||
| 854 | return 0; | ||
| 855 | } | ||
| 856 | |||
| 857 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | ||
| 858 | { | ||
| 859 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); | ||
| 860 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); | ||
| 861 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
| 862 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); | ||
| 863 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); | ||
| 864 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | ||
| 865 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | ||
| 866 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | ||
| 867 | |||
| 868 | /* TODO check for multimon */ | ||
| 869 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | ||
| 870 | |||
| 871 | return 0; | ||
| 872 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h new file mode 100644 index 000000000000..8b95249f0531 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | |||
| @@ -0,0 +1,102 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #ifndef VMWGFX_KMS_H_ | ||
| 29 | #define VMWGFX_KMS_H_ | ||
| 30 | |||
| 31 | #include "drmP.h" | ||
| 32 | #include "vmwgfx_drv.h" | ||
| 33 | |||
| 34 | |||
| 35 | #define vmw_framebuffer_to_vfb(x) \ | ||
| 36 | container_of(x, struct vmw_framebuffer, base) | ||
| 37 | |||
| 38 | /** | ||
| 39 | * Base class for framebuffers | ||
| 40 | * | ||
| 41 | * @pin is called the when ever a crtc uses this framebuffer | ||
| 42 | * @unpin is called | ||
| 43 | */ | ||
| 44 | struct vmw_framebuffer { | ||
| 45 | struct drm_framebuffer base; | ||
| 46 | int (*pin)(struct vmw_framebuffer *fb); | ||
| 47 | int (*unpin)(struct vmw_framebuffer *fb); | ||
| 48 | }; | ||
| 49 | |||
| 50 | |||
| 51 | #define vmw_crtc_to_du(x) \ | ||
| 52 | container_of(x, struct vmw_display_unit, crtc) | ||
| 53 | |||
| 54 | /* | ||
| 55 | * Basic cursor manipulation | ||
| 56 | */ | ||
| 57 | int vmw_cursor_update_image(struct vmw_private *dev_priv, | ||
| 58 | u32 *image, u32 width, u32 height, | ||
| 59 | u32 hotspotX, u32 hotspotY); | ||
| 60 | void vmw_cursor_update_position(struct vmw_private *dev_priv, | ||
| 61 | bool show, int x, int y); | ||
| 62 | |||
| 63 | /** | ||
| 64 | * Base class display unit. | ||
| 65 | * | ||
| 66 | * Since the SVGA hw doesn't have a concept of a crtc, encoder or connector | ||
| 67 | * so the display unit is all of them at the same time. This is true for both | ||
| 68 | * legacy multimon and screen objects. | ||
| 69 | */ | ||
| 70 | struct vmw_display_unit { | ||
| 71 | struct drm_crtc crtc; | ||
| 72 | struct drm_encoder encoder; | ||
| 73 | struct drm_connector connector; | ||
| 74 | |||
| 75 | struct vmw_surface *cursor_surface; | ||
| 76 | struct vmw_dma_buffer *cursor_dmabuf; | ||
| 77 | size_t cursor_age; | ||
| 78 | |||
| 79 | int cursor_x; | ||
| 80 | int cursor_y; | ||
| 81 | |||
| 82 | int hotspot_x; | ||
| 83 | int hotspot_y; | ||
| 84 | |||
| 85 | unsigned unit; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /* | ||
| 89 | * Shared display unit functions - vmwgfx_kms.c | ||
| 90 | */ | ||
| 91 | void vmw_display_unit_cleanup(struct vmw_display_unit *du); | ||
| 92 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
| 93 | uint32_t handle, uint32_t width, uint32_t height); | ||
| 94 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Legacy display unit functions - vmwgfx_ldu.h | ||
| 98 | */ | ||
| 99 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv); | ||
| 100 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv); | ||
| 101 | |||
| 102 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c new file mode 100644 index 000000000000..90891593bf6c --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
| @@ -0,0 +1,516 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_kms.h" | ||
| 29 | |||
| 30 | #define vmw_crtc_to_ldu(x) \ | ||
| 31 | container_of(x, struct vmw_legacy_display_unit, base.crtc) | ||
| 32 | #define vmw_encoder_to_ldu(x) \ | ||
| 33 | container_of(x, struct vmw_legacy_display_unit, base.encoder) | ||
| 34 | #define vmw_connector_to_ldu(x) \ | ||
| 35 | container_of(x, struct vmw_legacy_display_unit, base.connector) | ||
| 36 | |||
| 37 | struct vmw_legacy_display { | ||
| 38 | struct list_head active; | ||
| 39 | |||
| 40 | unsigned num_active; | ||
| 41 | |||
| 42 | struct vmw_framebuffer *fb; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /** | ||
| 46 | * Display unit using the legacy register interface. | ||
| 47 | */ | ||
| 48 | struct vmw_legacy_display_unit { | ||
| 49 | struct vmw_display_unit base; | ||
| 50 | |||
| 51 | struct list_head active; | ||
| 52 | |||
| 53 | unsigned unit; | ||
| 54 | }; | ||
| 55 | |||
| 56 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | ||
| 57 | { | ||
| 58 | list_del_init(&ldu->active); | ||
| 59 | vmw_display_unit_cleanup(&ldu->base); | ||
| 60 | kfree(ldu); | ||
| 61 | } | ||
| 62 | |||
| 63 | |||
| 64 | /* | ||
| 65 | * Legacy Display Unit CRTC functions | ||
| 66 | */ | ||
| 67 | |||
| 68 | static void vmw_ldu_crtc_save(struct drm_crtc *crtc) | ||
| 69 | { | ||
| 70 | } | ||
| 71 | |||
| 72 | static void vmw_ldu_crtc_restore(struct drm_crtc *crtc) | ||
| 73 | { | ||
| 74 | } | ||
| 75 | |||
| 76 | static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc, | ||
| 77 | u16 *r, u16 *g, u16 *b, | ||
| 78 | uint32_t size) | ||
| 79 | { | ||
| 80 | } | ||
| 81 | |||
| 82 | static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc) | ||
| 83 | { | ||
| 84 | vmw_ldu_destroy(vmw_crtc_to_ldu(crtc)); | ||
| 85 | } | ||
| 86 | |||
| 87 | static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | ||
| 88 | { | ||
| 89 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; | ||
| 90 | struct vmw_legacy_display_unit *entry; | ||
| 91 | struct drm_crtc *crtc; | ||
| 92 | int i = 0; | ||
| 93 | |||
| 94 | /* to stop the screen from changing size on resize */ | ||
| 95 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | ||
| 96 | for (i = 0; i < lds->num_active; i++) { | ||
| 97 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 98 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); | ||
| 99 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
| 100 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
| 101 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
| 102 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
| 103 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 104 | } | ||
| 105 | |||
| 106 | /* Now set the mode */ | ||
| 107 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); | ||
| 108 | i = 0; | ||
| 109 | list_for_each_entry(entry, &lds->active, active) { | ||
| 110 | crtc = &entry->base.crtc; | ||
| 111 | |||
| 112 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); | ||
| 113 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); | ||
| 114 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x); | ||
| 115 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); | ||
| 116 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); | ||
| 117 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); | ||
| 118 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
| 119 | |||
| 120 | i++; | ||
| 121 | } | ||
| 122 | |||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int vmw_ldu_del_active(struct vmw_private *vmw_priv, | ||
| 127 | struct vmw_legacy_display_unit *ldu) | ||
| 128 | { | ||
| 129 | struct vmw_legacy_display *ld = vmw_priv->ldu_priv; | ||
| 130 | if (list_empty(&ldu->active)) | ||
| 131 | return 0; | ||
| 132 | |||
| 133 | list_del_init(&ldu->active); | ||
| 134 | if (--(ld->num_active) == 0) { | ||
| 135 | BUG_ON(!ld->fb); | ||
| 136 | if (ld->fb->unpin) | ||
| 137 | ld->fb->unpin(ld->fb); | ||
| 138 | ld->fb = NULL; | ||
| 139 | } | ||
| 140 | |||
| 141 | return 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | static int vmw_ldu_add_active(struct vmw_private *vmw_priv, | ||
| 145 | struct vmw_legacy_display_unit *ldu, | ||
| 146 | struct vmw_framebuffer *vfb) | ||
| 147 | { | ||
| 148 | struct vmw_legacy_display *ld = vmw_priv->ldu_priv; | ||
| 149 | struct vmw_legacy_display_unit *entry; | ||
| 150 | struct list_head *at; | ||
| 151 | |||
| 152 | if (!list_empty(&ldu->active)) | ||
| 153 | return 0; | ||
| 154 | |||
| 155 | at = &ld->active; | ||
| 156 | list_for_each_entry(entry, &ld->active, active) { | ||
| 157 | if (entry->unit > ldu->unit) | ||
| 158 | break; | ||
| 159 | |||
| 160 | at = &entry->active; | ||
| 161 | } | ||
| 162 | |||
| 163 | list_add(&ldu->active, at); | ||
| 164 | if (ld->num_active++ == 0) { | ||
| 165 | BUG_ON(ld->fb); | ||
| 166 | if (vfb->pin) | ||
| 167 | vfb->pin(vfb); | ||
| 168 | ld->fb = vfb; | ||
| 169 | } | ||
| 170 | |||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | ||
| 175 | { | ||
| 176 | struct vmw_private *dev_priv; | ||
| 177 | struct vmw_legacy_display_unit *ldu; | ||
| 178 | struct drm_connector *connector; | ||
| 179 | struct drm_display_mode *mode; | ||
| 180 | struct drm_encoder *encoder; | ||
| 181 | struct vmw_framebuffer *vfb; | ||
| 182 | struct drm_framebuffer *fb; | ||
| 183 | struct drm_crtc *crtc; | ||
| 184 | |||
| 185 | if (!set) | ||
| 186 | return -EINVAL; | ||
| 187 | |||
| 188 | if (!set->crtc) | ||
| 189 | return -EINVAL; | ||
| 190 | |||
| 191 | /* get the ldu */ | ||
| 192 | crtc = set->crtc; | ||
| 193 | ldu = vmw_crtc_to_ldu(crtc); | ||
| 194 | vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; | ||
| 195 | dev_priv = vmw_priv(crtc->dev); | ||
| 196 | |||
| 197 | if (set->num_connectors > 1) { | ||
| 198 | DRM_ERROR("to many connectors\n"); | ||
| 199 | return -EINVAL; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (set->num_connectors == 1 && | ||
| 203 | set->connectors[0] != &ldu->base.connector) { | ||
| 204 | DRM_ERROR("connector doesn't match %p %p\n", | ||
| 205 | set->connectors[0], &ldu->base.connector); | ||
| 206 | return -EINVAL; | ||
| 207 | } | ||
| 208 | |||
| 209 | /* ldu only supports one fb active at the time */ | ||
| 210 | if (dev_priv->ldu_priv->fb && vfb && | ||
| 211 | dev_priv->ldu_priv->fb != vfb) { | ||
| 212 | DRM_ERROR("Multiple framebuffers not supported\n"); | ||
| 213 | return -EINVAL; | ||
| 214 | } | ||
| 215 | |||
| 216 | /* since they always map one to one these are safe */ | ||
| 217 | connector = &ldu->base.connector; | ||
| 218 | encoder = &ldu->base.encoder; | ||
| 219 | |||
| 220 | /* should we turn the crtc off? */ | ||
| 221 | if (set->num_connectors == 0 || !set->mode || !set->fb) { | ||
| 222 | |||
| 223 | connector->encoder = NULL; | ||
| 224 | encoder->crtc = NULL; | ||
| 225 | crtc->fb = NULL; | ||
| 226 | |||
| 227 | vmw_ldu_del_active(dev_priv, ldu); | ||
| 228 | |||
| 229 | vmw_ldu_commit_list(dev_priv); | ||
| 230 | |||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | |||
| 235 | /* we now know we want to set a mode */ | ||
| 236 | mode = set->mode; | ||
| 237 | fb = set->fb; | ||
| 238 | |||
| 239 | if (set->x + mode->hdisplay > fb->width || | ||
| 240 | set->y + mode->vdisplay > fb->height) { | ||
| 241 | DRM_ERROR("set outside of framebuffer\n"); | ||
| 242 | return -EINVAL; | ||
| 243 | } | ||
| 244 | |||
| 245 | vmw_fb_off(dev_priv); | ||
| 246 | |||
| 247 | crtc->fb = fb; | ||
| 248 | encoder->crtc = crtc; | ||
| 249 | connector->encoder = encoder; | ||
| 250 | crtc->x = set->x; | ||
| 251 | crtc->y = set->y; | ||
| 252 | crtc->mode = *mode; | ||
| 253 | |||
| 254 | vmw_ldu_add_active(dev_priv, ldu, vfb); | ||
| 255 | |||
| 256 | vmw_ldu_commit_list(dev_priv); | ||
| 257 | |||
| 258 | return 0; | ||
| 259 | } | ||
| 260 | |||
| 261 | static struct drm_crtc_funcs vmw_legacy_crtc_funcs = { | ||
| 262 | .save = vmw_ldu_crtc_save, | ||
| 263 | .restore = vmw_ldu_crtc_restore, | ||
| 264 | .cursor_set = vmw_du_crtc_cursor_set, | ||
| 265 | .cursor_move = vmw_du_crtc_cursor_move, | ||
| 266 | .gamma_set = vmw_ldu_crtc_gamma_set, | ||
| 267 | .destroy = vmw_ldu_crtc_destroy, | ||
| 268 | .set_config = vmw_ldu_crtc_set_config, | ||
| 269 | }; | ||
| 270 | |||
| 271 | /* | ||
| 272 | * Legacy Display Unit encoder functions | ||
| 273 | */ | ||
| 274 | |||
| 275 | static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder) | ||
| 276 | { | ||
| 277 | vmw_ldu_destroy(vmw_encoder_to_ldu(encoder)); | ||
| 278 | } | ||
| 279 | |||
| 280 | static struct drm_encoder_funcs vmw_legacy_encoder_funcs = { | ||
| 281 | .destroy = vmw_ldu_encoder_destroy, | ||
| 282 | }; | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Legacy Display Unit connector functions | ||
| 286 | */ | ||
| 287 | |||
| 288 | static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode) | ||
| 289 | { | ||
| 290 | } | ||
| 291 | |||
| 292 | static void vmw_ldu_connector_save(struct drm_connector *connector) | ||
| 293 | { | ||
| 294 | } | ||
| 295 | |||
| 296 | static void vmw_ldu_connector_restore(struct drm_connector *connector) | ||
| 297 | { | ||
| 298 | } | ||
| 299 | |||
| 300 | static enum drm_connector_status | ||
| 301 | vmw_ldu_connector_detect(struct drm_connector *connector) | ||
| 302 | { | ||
| 303 | /* XXX vmwctrl should control connection status */ | ||
| 304 | if (vmw_connector_to_ldu(connector)->base.unit == 0) | ||
| 305 | return connector_status_connected; | ||
| 306 | return connector_status_disconnected; | ||
| 307 | } | ||
| 308 | |||
| 309 | static struct drm_display_mode vmw_ldu_connector_builtin[] = { | ||
| 310 | /* 640x480@60Hz */ | ||
| 311 | { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, | ||
| 312 | 752, 800, 0, 480, 489, 492, 525, 0, | ||
| 313 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
| 314 | /* 800x600@60Hz */ | ||
| 315 | { DRM_MODE("800x600", | ||
| 316 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | ||
| 317 | 40000, 800, 840, 968, 1056, 0, 600, 601, 605, 628, | ||
| 318 | 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 319 | /* 1024x768@60Hz */ | ||
| 320 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, | ||
| 321 | 1184, 1344, 0, 768, 771, 777, 806, 0, | ||
| 322 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
| 323 | /* 1152x864@75Hz */ | ||
| 324 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, | ||
| 325 | 1344, 1600, 0, 864, 865, 868, 900, 0, | ||
| 326 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 327 | /* 1280x768@60Hz */ | ||
| 328 | { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, | ||
| 329 | 1472, 1664, 0, 768, 771, 778, 798, 0, | ||
| 330 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 331 | /* 1280x800@60Hz */ | ||
| 332 | { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, | ||
| 333 | 1480, 1680, 0, 800, 803, 809, 831, 0, | ||
| 334 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, | ||
| 335 | /* 1280x960@60Hz */ | ||
| 336 | { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, | ||
| 337 | 1488, 1800, 0, 960, 961, 964, 1000, 0, | ||
| 338 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 339 | /* 1280x1024@60Hz */ | ||
| 340 | { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, | ||
| 341 | 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, | ||
| 342 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 343 | /* 1360x768@60Hz */ | ||
| 344 | { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, | ||
| 345 | 1536, 1792, 0, 768, 771, 777, 795, 0, | ||
| 346 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 347 | /* 1440x1050@60Hz */ | ||
| 348 | { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, | ||
| 349 | 1632, 1864, 0, 1050, 1053, 1057, 1089, 0, | ||
| 350 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 351 | /* 1440x900@60Hz */ | ||
| 352 | { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, | ||
| 353 | 1672, 1904, 0, 900, 903, 909, 934, 0, | ||
| 354 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 355 | /* 1600x1200@60Hz */ | ||
| 356 | { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, | ||
| 357 | 1856, 2160, 0, 1200, 1201, 1204, 1250, 0, | ||
| 358 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 359 | /* 1680x1050@60Hz */ | ||
| 360 | { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, | ||
| 361 | 1960, 2240, 0, 1050, 1053, 1059, 1089, 0, | ||
| 362 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 363 | /* 1792x1344@60Hz */ | ||
| 364 | { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, | ||
| 365 | 2120, 2448, 0, 1344, 1345, 1348, 1394, 0, | ||
| 366 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 367 | /* 1853x1392@60Hz */ | ||
| 368 | { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, | ||
| 369 | 2176, 2528, 0, 1392, 1393, 1396, 1439, 0, | ||
| 370 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 371 | /* 1920x1200@60Hz */ | ||
| 372 | { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, | ||
| 373 | 2256, 2592, 0, 1200, 1203, 1209, 1245, 0, | ||
| 374 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 375 | /* 1920x1440@60Hz */ | ||
| 376 | { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, | ||
| 377 | 2256, 2600, 0, 1440, 1441, 1444, 1500, 0, | ||
| 378 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 379 | /* 2560x1600@60Hz */ | ||
| 380 | { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, | ||
| 381 | 3032, 3504, 0, 1600, 1603, 1609, 1658, 0, | ||
| 382 | DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, | ||
| 383 | /* Terminate */ | ||
| 384 | { DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) }, | ||
| 385 | }; | ||
| 386 | |||
| 387 | static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | ||
| 388 | uint32_t max_width, uint32_t max_height) | ||
| 389 | { | ||
| 390 | struct drm_device *dev = connector->dev; | ||
| 391 | struct drm_display_mode *mode = NULL; | ||
| 392 | int i; | ||
| 393 | |||
| 394 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | ||
| 395 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || | ||
| 396 | vmw_ldu_connector_builtin[i].vdisplay > max_height) | ||
| 397 | continue; | ||
| 398 | |||
| 399 | mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); | ||
| 400 | if (!mode) | ||
| 401 | return 0; | ||
| 402 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
| 403 | |||
| 404 | drm_mode_probed_add(connector, mode); | ||
| 405 | } | ||
| 406 | |||
| 407 | drm_mode_connector_list_update(connector); | ||
| 408 | |||
| 409 | return 1; | ||
| 410 | } | ||
| 411 | |||
| 412 | static int vmw_ldu_connector_set_property(struct drm_connector *connector, | ||
| 413 | struct drm_property *property, | ||
| 414 | uint64_t val) | ||
| 415 | { | ||
| 416 | return 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | static void vmw_ldu_connector_destroy(struct drm_connector *connector) | ||
| 420 | { | ||
| 421 | vmw_ldu_destroy(vmw_connector_to_ldu(connector)); | ||
| 422 | } | ||
| 423 | |||
| 424 | static struct drm_connector_funcs vmw_legacy_connector_funcs = { | ||
| 425 | .dpms = vmw_ldu_connector_dpms, | ||
| 426 | .save = vmw_ldu_connector_save, | ||
| 427 | .restore = vmw_ldu_connector_restore, | ||
| 428 | .detect = vmw_ldu_connector_detect, | ||
| 429 | .fill_modes = vmw_ldu_connector_fill_modes, | ||
| 430 | .set_property = vmw_ldu_connector_set_property, | ||
| 431 | .destroy = vmw_ldu_connector_destroy, | ||
| 432 | }; | ||
| 433 | |||
| 434 | static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | ||
| 435 | { | ||
| 436 | struct vmw_legacy_display_unit *ldu; | ||
| 437 | struct drm_device *dev = dev_priv->dev; | ||
| 438 | struct drm_connector *connector; | ||
| 439 | struct drm_encoder *encoder; | ||
| 440 | struct drm_crtc *crtc; | ||
| 441 | |||
| 442 | ldu = kzalloc(sizeof(*ldu), GFP_KERNEL); | ||
| 443 | if (!ldu) | ||
| 444 | return -ENOMEM; | ||
| 445 | |||
| 446 | ldu->unit = unit; | ||
| 447 | crtc = &ldu->base.crtc; | ||
| 448 | encoder = &ldu->base.encoder; | ||
| 449 | connector = &ldu->base.connector; | ||
| 450 | |||
| 451 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | ||
| 452 | DRM_MODE_CONNECTOR_LVDS); | ||
| 453 | /* Initial status */ | ||
| 454 | if (unit == 0) | ||
| 455 | connector->status = connector_status_connected; | ||
| 456 | else | ||
| 457 | connector->status = connector_status_disconnected; | ||
| 458 | |||
| 459 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | ||
| 460 | DRM_MODE_ENCODER_LVDS); | ||
| 461 | drm_mode_connector_attach_encoder(connector, encoder); | ||
| 462 | encoder->possible_crtcs = (1 << unit); | ||
| 463 | encoder->possible_clones = 0; | ||
| 464 | |||
| 465 | INIT_LIST_HEAD(&ldu->active); | ||
| 466 | |||
| 467 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | ||
| 468 | |||
| 469 | drm_connector_attach_property(connector, | ||
| 470 | dev->mode_config.dirty_info_property, | ||
| 471 | 1); | ||
| 472 | |||
| 473 | return 0; | ||
| 474 | } | ||
| 475 | |||
| 476 | int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | ||
| 477 | { | ||
| 478 | if (dev_priv->ldu_priv) { | ||
| 479 | DRM_INFO("ldu system already on\n"); | ||
| 480 | return -EINVAL; | ||
| 481 | } | ||
| 482 | |||
| 483 | dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); | ||
| 484 | |||
| 485 | if (!dev_priv->ldu_priv) | ||
| 486 | return -ENOMEM; | ||
| 487 | |||
| 488 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); | ||
| 489 | dev_priv->ldu_priv->num_active = 0; | ||
| 490 | dev_priv->ldu_priv->fb = NULL; | ||
| 491 | |||
| 492 | drm_mode_create_dirty_info_property(dev_priv->dev); | ||
| 493 | |||
| 494 | vmw_ldu_init(dev_priv, 0); | ||
| 495 | vmw_ldu_init(dev_priv, 1); | ||
| 496 | vmw_ldu_init(dev_priv, 2); | ||
| 497 | vmw_ldu_init(dev_priv, 3); | ||
| 498 | vmw_ldu_init(dev_priv, 4); | ||
| 499 | vmw_ldu_init(dev_priv, 5); | ||
| 500 | vmw_ldu_init(dev_priv, 6); | ||
| 501 | vmw_ldu_init(dev_priv, 7); | ||
| 502 | |||
| 503 | return 0; | ||
| 504 | } | ||
| 505 | |||
| 506 | int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) | ||
| 507 | { | ||
| 508 | if (!dev_priv->ldu_priv) | ||
| 509 | return -ENOSYS; | ||
| 510 | |||
| 511 | BUG_ON(!list_empty(&dev_priv->ldu_priv->active)); | ||
| 512 | |||
| 513 | kfree(dev_priv->ldu_priv); | ||
| 514 | |||
| 515 | return 0; | ||
| 516 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c new file mode 100644 index 000000000000..bb6e6a096d25 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
| @@ -0,0 +1,634 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | |||
| 29 | #include "drmP.h" | ||
| 30 | #include "vmwgfx_drv.h" | ||
| 31 | |||
| 32 | #include "ttm/ttm_placement.h" | ||
| 33 | |||
| 34 | #include "svga_overlay.h" | ||
| 35 | #include "svga_escape.h" | ||
| 36 | |||
| 37 | #define VMW_MAX_NUM_STREAMS 1 | ||
| 38 | |||
| 39 | struct vmw_stream { | ||
| 40 | struct vmw_dma_buffer *buf; | ||
| 41 | bool claimed; | ||
| 42 | bool paused; | ||
| 43 | struct drm_vmw_control_stream_arg saved; | ||
| 44 | }; | ||
| 45 | |||
| 46 | /** | ||
| 47 | * Overlay control | ||
| 48 | */ | ||
| 49 | struct vmw_overlay { | ||
| 50 | /* | ||
| 51 | * Each stream is a single overlay. In Xv these are called ports. | ||
| 52 | */ | ||
| 53 | struct mutex mutex; | ||
| 54 | struct vmw_stream stream[VMW_MAX_NUM_STREAMS]; | ||
| 55 | }; | ||
| 56 | |||
| 57 | static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev) | ||
| 58 | { | ||
| 59 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 60 | return dev_priv ? dev_priv->overlay_priv : NULL; | ||
| 61 | } | ||
| 62 | |||
| 63 | struct vmw_escape_header { | ||
| 64 | uint32_t cmd; | ||
| 65 | SVGAFifoCmdEscape body; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct vmw_escape_video_flush { | ||
| 69 | struct vmw_escape_header escape; | ||
| 70 | SVGAEscapeVideoFlush flush; | ||
| 71 | }; | ||
| 72 | |||
| 73 | static inline void fill_escape(struct vmw_escape_header *header, | ||
| 74 | uint32_t size) | ||
| 75 | { | ||
| 76 | header->cmd = SVGA_CMD_ESCAPE; | ||
| 77 | header->body.nsid = SVGA_ESCAPE_NSID_VMWARE; | ||
| 78 | header->body.size = size; | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline void fill_flush(struct vmw_escape_video_flush *cmd, | ||
| 82 | uint32_t stream_id) | ||
| 83 | { | ||
| 84 | fill_escape(&cmd->escape, sizeof(cmd->flush)); | ||
| 85 | cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; | ||
| 86 | cmd->flush.streamId = stream_id; | ||
| 87 | } | ||
| 88 | |||
| 89 | /** | ||
| 90 | * Pin or unpin a buffer in vram. | ||
| 91 | * | ||
| 92 | * @dev_priv: Driver private. | ||
| 93 | * @buf: DMA buffer to pin or unpin. | ||
| 94 | * @pin: Pin buffer in vram if true. | ||
| 95 | * @interruptible: Use interruptible wait. | ||
| 96 | * | ||
| 97 | * Takes the current masters ttm lock in read. | ||
| 98 | * | ||
| 99 | * Returns | ||
| 100 | * -ERESTARTSYS if interrupted by a signal. | ||
| 101 | */ | ||
| 102 | static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, | ||
| 103 | struct vmw_dma_buffer *buf, | ||
| 104 | bool pin, bool interruptible) | ||
| 105 | { | ||
| 106 | struct ttm_buffer_object *bo = &buf->base; | ||
| 107 | struct ttm_bo_global *glob = bo->glob; | ||
| 108 | struct ttm_placement *overlay_placement = &vmw_vram_placement; | ||
| 109 | int ret; | ||
| 110 | |||
| 111 | ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); | ||
| 112 | if (unlikely(ret != 0)) | ||
| 113 | return ret; | ||
| 114 | |||
| 115 | ret = ttm_bo_reserve(bo, interruptible, false, false, 0); | ||
| 116 | if (unlikely(ret != 0)) | ||
| 117 | goto err; | ||
| 118 | |||
| 119 | if (buf->gmr_bound) { | ||
| 120 | vmw_gmr_unbind(dev_priv, buf->gmr_id); | ||
| 121 | spin_lock(&glob->lru_lock); | ||
| 122 | ida_remove(&dev_priv->gmr_ida, buf->gmr_id); | ||
| 123 | spin_unlock(&glob->lru_lock); | ||
| 124 | buf->gmr_bound = NULL; | ||
| 125 | } | ||
| 126 | |||
| 127 | if (pin) | ||
| 128 | overlay_placement = &vmw_vram_ne_placement; | ||
| 129 | |||
| 130 | ret = ttm_bo_validate(bo, overlay_placement, interruptible, false); | ||
| 131 | |||
| 132 | ttm_bo_unreserve(bo); | ||
| 133 | |||
| 134 | err: | ||
| 135 | ttm_read_unlock(&dev_priv->active_master->lock); | ||
| 136 | |||
| 137 | return ret; | ||
| 138 | } | ||
| 139 | |||
| 140 | /** | ||
| 141 | * Send put command to hw. | ||
| 142 | * | ||
| 143 | * Returns | ||
| 144 | * -ERESTARTSYS if interrupted by a signal. | ||
| 145 | */ | ||
| 146 | static int vmw_overlay_send_put(struct vmw_private *dev_priv, | ||
| 147 | struct vmw_dma_buffer *buf, | ||
| 148 | struct drm_vmw_control_stream_arg *arg, | ||
| 149 | bool interruptible) | ||
| 150 | { | ||
| 151 | struct { | ||
| 152 | struct vmw_escape_header escape; | ||
| 153 | struct { | ||
| 154 | struct { | ||
| 155 | uint32_t cmdType; | ||
| 156 | uint32_t streamId; | ||
| 157 | } header; | ||
| 158 | struct { | ||
| 159 | uint32_t registerId; | ||
| 160 | uint32_t value; | ||
| 161 | } items[SVGA_VIDEO_PITCH_3 + 1]; | ||
| 162 | } body; | ||
| 163 | struct vmw_escape_video_flush flush; | ||
| 164 | } *cmds; | ||
| 165 | uint32_t offset; | ||
| 166 | int i, ret; | ||
| 167 | |||
| 168 | for (;;) { | ||
| 169 | cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); | ||
| 170 | if (cmds) | ||
| 171 | break; | ||
| 172 | |||
| 173 | ret = vmw_fallback_wait(dev_priv, false, true, 0, | ||
| 174 | interruptible, 3*HZ); | ||
| 175 | if (interruptible && ret == -ERESTARTSYS) | ||
| 176 | return ret; | ||
| 177 | else | ||
| 178 | BUG_ON(ret != 0); | ||
| 179 | } | ||
| 180 | |||
| 181 | fill_escape(&cmds->escape, sizeof(cmds->body)); | ||
| 182 | cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
| 183 | cmds->body.header.streamId = arg->stream_id; | ||
| 184 | |||
| 185 | for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++) | ||
| 186 | cmds->body.items[i].registerId = i; | ||
| 187 | |||
| 188 | offset = buf->base.offset + arg->offset; | ||
| 189 | |||
| 190 | cmds->body.items[SVGA_VIDEO_ENABLED].value = true; | ||
| 191 | cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags; | ||
| 192 | cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset; | ||
| 193 | cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format; | ||
| 194 | cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key; | ||
| 195 | cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size; | ||
| 196 | cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width; | ||
| 197 | cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height; | ||
| 198 | cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x; | ||
| 199 | cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y; | ||
| 200 | cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; | ||
| 201 | cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; | ||
| 202 | cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x; | ||
| 203 | cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y; | ||
| 204 | cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; | ||
| 205 | cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; | ||
| 206 | cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; | ||
| 207 | cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; | ||
| 208 | cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; | ||
| 209 | |||
| 210 | fill_flush(&cmds->flush, arg->stream_id); | ||
| 211 | |||
| 212 | vmw_fifo_commit(dev_priv, sizeof(*cmds)); | ||
| 213 | |||
| 214 | return 0; | ||
| 215 | } | ||
| 216 | |||
| 217 | /** | ||
| 218 | * Send stop command to hw. | ||
| 219 | * | ||
| 220 | * Returns | ||
| 221 | * -ERESTARTSYS if interrupted by a signal. | ||
| 222 | */ | ||
| 223 | static int vmw_overlay_send_stop(struct vmw_private *dev_priv, | ||
| 224 | uint32_t stream_id, | ||
| 225 | bool interruptible) | ||
| 226 | { | ||
| 227 | struct { | ||
| 228 | struct vmw_escape_header escape; | ||
| 229 | SVGAEscapeVideoSetRegs body; | ||
| 230 | struct vmw_escape_video_flush flush; | ||
| 231 | } *cmds; | ||
| 232 | int ret; | ||
| 233 | |||
| 234 | for (;;) { | ||
| 235 | cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); | ||
| 236 | if (cmds) | ||
| 237 | break; | ||
| 238 | |||
| 239 | ret = vmw_fallback_wait(dev_priv, false, true, 0, | ||
| 240 | interruptible, 3*HZ); | ||
| 241 | if (interruptible && ret == -ERESTARTSYS) | ||
| 242 | return ret; | ||
| 243 | else | ||
| 244 | BUG_ON(ret != 0); | ||
| 245 | } | ||
| 246 | |||
| 247 | fill_escape(&cmds->escape, sizeof(cmds->body)); | ||
| 248 | cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; | ||
| 249 | cmds->body.header.streamId = stream_id; | ||
| 250 | cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED; | ||
| 251 | cmds->body.items[0].value = false; | ||
| 252 | fill_flush(&cmds->flush, stream_id); | ||
| 253 | |||
| 254 | vmw_fifo_commit(dev_priv, sizeof(*cmds)); | ||
| 255 | |||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * Stop or pause a stream. | ||
| 261 | * | ||
| 262 | * If the stream is paused the no evict flag is removed from the buffer | ||
| 263 | * but left in vram. This allows for instance mode_set to evict it | ||
| 264 | * should it need to. | ||
| 265 | * | ||
| 266 | * The caller must hold the overlay lock. | ||
| 267 | * | ||
| 268 | * @stream_id which stream to stop/pause. | ||
| 269 | * @pause true to pause, false to stop completely. | ||
| 270 | */ | ||
| 271 | static int vmw_overlay_stop(struct vmw_private *dev_priv, | ||
| 272 | uint32_t stream_id, bool pause, | ||
| 273 | bool interruptible) | ||
| 274 | { | ||
| 275 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 276 | struct vmw_stream *stream = &overlay->stream[stream_id]; | ||
| 277 | int ret; | ||
| 278 | |||
| 279 | /* no buffer attached the stream is completely stopped */ | ||
| 280 | if (!stream->buf) | ||
| 281 | return 0; | ||
| 282 | |||
| 283 | /* If the stream is paused this is already done */ | ||
| 284 | if (!stream->paused) { | ||
| 285 | ret = vmw_overlay_send_stop(dev_priv, stream_id, | ||
| 286 | interruptible); | ||
| 287 | if (ret) | ||
| 288 | return ret; | ||
| 289 | |||
| 290 | /* We just remove the NO_EVICT flag so no -ENOMEM */ | ||
| 291 | ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false, | ||
| 292 | interruptible); | ||
| 293 | if (interruptible && ret == -ERESTARTSYS) | ||
| 294 | return ret; | ||
| 295 | else | ||
| 296 | BUG_ON(ret != 0); | ||
| 297 | } | ||
| 298 | |||
| 299 | if (!pause) { | ||
| 300 | vmw_dmabuf_unreference(&stream->buf); | ||
| 301 | stream->paused = false; | ||
| 302 | } else { | ||
| 303 | stream->paused = true; | ||
| 304 | } | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | /** | ||
| 310 | * Update a stream and send any put or stop fifo commands needed. | ||
| 311 | * | ||
| 312 | * The caller must hold the overlay lock. | ||
| 313 | * | ||
| 314 | * Returns | ||
| 315 | * -ENOMEM if buffer doesn't fit in vram. | ||
| 316 | * -ERESTARTSYS if interrupted. | ||
| 317 | */ | ||
| 318 | static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | ||
| 319 | struct vmw_dma_buffer *buf, | ||
| 320 | struct drm_vmw_control_stream_arg *arg, | ||
| 321 | bool interruptible) | ||
| 322 | { | ||
| 323 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 324 | struct vmw_stream *stream = &overlay->stream[arg->stream_id]; | ||
| 325 | int ret = 0; | ||
| 326 | |||
| 327 | if (!buf) | ||
| 328 | return -EINVAL; | ||
| 329 | |||
| 330 | DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__, | ||
| 331 | stream->buf, buf, stream->paused ? "" : "not "); | ||
| 332 | |||
| 333 | if (stream->buf != buf) { | ||
| 334 | ret = vmw_overlay_stop(dev_priv, arg->stream_id, | ||
| 335 | false, interruptible); | ||
| 336 | if (ret) | ||
| 337 | return ret; | ||
| 338 | } else if (!stream->paused) { | ||
| 339 | /* If the buffers match and not paused then just send | ||
| 340 | * the put command, no need to do anything else. | ||
| 341 | */ | ||
| 342 | ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); | ||
| 343 | if (ret == 0) | ||
| 344 | stream->saved = *arg; | ||
| 345 | else | ||
| 346 | BUG_ON(!interruptible); | ||
| 347 | |||
| 348 | return ret; | ||
| 349 | } | ||
| 350 | |||
| 351 | /* We don't start the old stream if we are interrupted. | ||
| 352 | * Might return -ENOMEM if it can't fit the buffer in vram. | ||
| 353 | */ | ||
| 354 | ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible); | ||
| 355 | if (ret) | ||
| 356 | return ret; | ||
| 357 | |||
| 358 | ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); | ||
| 359 | if (ret) { | ||
| 360 | /* This one needs to happen no matter what. We only remove | ||
| 361 | * the NO_EVICT flag so this is safe from -ENOMEM. | ||
| 362 | */ | ||
| 363 | BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0); | ||
| 364 | return ret; | ||
| 365 | } | ||
| 366 | |||
| 367 | if (stream->buf != buf) | ||
| 368 | stream->buf = vmw_dmabuf_reference(buf); | ||
| 369 | stream->saved = *arg; | ||
| 370 | |||
| 371 | return 0; | ||
| 372 | } | ||
| 373 | |||
| 374 | /** | ||
| 375 | * Stop all streams. | ||
| 376 | * | ||
| 377 | * Used by the fb code when starting. | ||
| 378 | * | ||
| 379 | * Takes the overlay lock. | ||
| 380 | */ | ||
| 381 | int vmw_overlay_stop_all(struct vmw_private *dev_priv) | ||
| 382 | { | ||
| 383 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 384 | int i, ret; | ||
| 385 | |||
| 386 | if (!overlay) | ||
| 387 | return 0; | ||
| 388 | |||
| 389 | mutex_lock(&overlay->mutex); | ||
| 390 | |||
| 391 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 392 | struct vmw_stream *stream = &overlay->stream[i]; | ||
| 393 | if (!stream->buf) | ||
| 394 | continue; | ||
| 395 | |||
| 396 | ret = vmw_overlay_stop(dev_priv, i, false, false); | ||
| 397 | WARN_ON(ret != 0); | ||
| 398 | } | ||
| 399 | |||
| 400 | mutex_unlock(&overlay->mutex); | ||
| 401 | |||
| 402 | return 0; | ||
| 403 | } | ||
| 404 | |||
| 405 | /** | ||
| 406 | * Try to resume all paused streams. | ||
| 407 | * | ||
| 408 | * Used by the kms code after moving a new scanout buffer to vram. | ||
| 409 | * | ||
| 410 | * Takes the overlay lock. | ||
| 411 | */ | ||
| 412 | int vmw_overlay_resume_all(struct vmw_private *dev_priv) | ||
| 413 | { | ||
| 414 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 415 | int i, ret; | ||
| 416 | |||
| 417 | if (!overlay) | ||
| 418 | return 0; | ||
| 419 | |||
| 420 | mutex_lock(&overlay->mutex); | ||
| 421 | |||
| 422 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 423 | struct vmw_stream *stream = &overlay->stream[i]; | ||
| 424 | if (!stream->paused) | ||
| 425 | continue; | ||
| 426 | |||
| 427 | ret = vmw_overlay_update_stream(dev_priv, stream->buf, | ||
| 428 | &stream->saved, false); | ||
| 429 | if (ret != 0) | ||
| 430 | DRM_INFO("%s: *warning* failed to resume stream %i\n", | ||
| 431 | __func__, i); | ||
| 432 | } | ||
| 433 | |||
| 434 | mutex_unlock(&overlay->mutex); | ||
| 435 | |||
| 436 | return 0; | ||
| 437 | } | ||
| 438 | |||
| 439 | /** | ||
| 440 | * Pauses all active streams. | ||
| 441 | * | ||
| 442 | * Used by the kms code when moving a new scanout buffer to vram. | ||
| 443 | * | ||
| 444 | * Takes the overlay lock. | ||
| 445 | */ | ||
| 446 | int vmw_overlay_pause_all(struct vmw_private *dev_priv) | ||
| 447 | { | ||
| 448 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 449 | int i, ret; | ||
| 450 | |||
| 451 | if (!overlay) | ||
| 452 | return 0; | ||
| 453 | |||
| 454 | mutex_lock(&overlay->mutex); | ||
| 455 | |||
| 456 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 457 | if (overlay->stream[i].paused) | ||
| 458 | DRM_INFO("%s: *warning* stream %i already paused\n", | ||
| 459 | __func__, i); | ||
| 460 | ret = vmw_overlay_stop(dev_priv, i, true, false); | ||
| 461 | WARN_ON(ret != 0); | ||
| 462 | } | ||
| 463 | |||
| 464 | mutex_unlock(&overlay->mutex); | ||
| 465 | |||
| 466 | return 0; | ||
| 467 | } | ||
| 468 | |||
| 469 | int vmw_overlay_ioctl(struct drm_device *dev, void *data, | ||
| 470 | struct drm_file *file_priv) | ||
| 471 | { | ||
| 472 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 473 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 474 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 475 | struct drm_vmw_control_stream_arg *arg = | ||
| 476 | (struct drm_vmw_control_stream_arg *)data; | ||
| 477 | struct vmw_dma_buffer *buf; | ||
| 478 | struct vmw_resource *res; | ||
| 479 | int ret; | ||
| 480 | |||
| 481 | if (!overlay) | ||
| 482 | return -ENOSYS; | ||
| 483 | |||
| 484 | ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); | ||
| 485 | if (ret) | ||
| 486 | return ret; | ||
| 487 | |||
| 488 | mutex_lock(&overlay->mutex); | ||
| 489 | |||
| 490 | if (!arg->enabled) { | ||
| 491 | ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); | ||
| 492 | goto out_unlock; | ||
| 493 | } | ||
| 494 | |||
| 495 | ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); | ||
| 496 | if (ret) | ||
| 497 | goto out_unlock; | ||
| 498 | |||
| 499 | ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); | ||
| 500 | |||
| 501 | vmw_dmabuf_unreference(&buf); | ||
| 502 | |||
| 503 | out_unlock: | ||
| 504 | mutex_unlock(&overlay->mutex); | ||
| 505 | vmw_resource_unreference(&res); | ||
| 506 | |||
| 507 | return ret; | ||
| 508 | } | ||
| 509 | |||
| 510 | int vmw_overlay_num_overlays(struct vmw_private *dev_priv) | ||
| 511 | { | ||
| 512 | if (!dev_priv->overlay_priv) | ||
| 513 | return 0; | ||
| 514 | |||
| 515 | return VMW_MAX_NUM_STREAMS; | ||
| 516 | } | ||
| 517 | |||
| 518 | int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) | ||
| 519 | { | ||
| 520 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 521 | int i, k; | ||
| 522 | |||
| 523 | if (!overlay) | ||
| 524 | return 0; | ||
| 525 | |||
| 526 | mutex_lock(&overlay->mutex); | ||
| 527 | |||
| 528 | for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++) | ||
| 529 | if (!overlay->stream[i].claimed) | ||
| 530 | k++; | ||
| 531 | |||
| 532 | mutex_unlock(&overlay->mutex); | ||
| 533 | |||
| 534 | return k; | ||
| 535 | } | ||
| 536 | |||
| 537 | int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out) | ||
| 538 | { | ||
| 539 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 540 | int i; | ||
| 541 | |||
| 542 | if (!overlay) | ||
| 543 | return -ENOSYS; | ||
| 544 | |||
| 545 | mutex_lock(&overlay->mutex); | ||
| 546 | |||
| 547 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 548 | |||
| 549 | if (overlay->stream[i].claimed) | ||
| 550 | continue; | ||
| 551 | |||
| 552 | overlay->stream[i].claimed = true; | ||
| 553 | *out = i; | ||
| 554 | mutex_unlock(&overlay->mutex); | ||
| 555 | return 0; | ||
| 556 | } | ||
| 557 | |||
| 558 | mutex_unlock(&overlay->mutex); | ||
| 559 | return -ESRCH; | ||
| 560 | } | ||
| 561 | |||
| 562 | int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id) | ||
| 563 | { | ||
| 564 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 565 | |||
| 566 | BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS); | ||
| 567 | |||
| 568 | if (!overlay) | ||
| 569 | return -ENOSYS; | ||
| 570 | |||
| 571 | mutex_lock(&overlay->mutex); | ||
| 572 | |||
| 573 | WARN_ON(!overlay->stream[stream_id].claimed); | ||
| 574 | vmw_overlay_stop(dev_priv, stream_id, false, false); | ||
| 575 | overlay->stream[stream_id].claimed = false; | ||
| 576 | |||
| 577 | mutex_unlock(&overlay->mutex); | ||
| 578 | return 0; | ||
| 579 | } | ||
| 580 | |||
| 581 | int vmw_overlay_init(struct vmw_private *dev_priv) | ||
| 582 | { | ||
| 583 | struct vmw_overlay *overlay; | ||
| 584 | int i; | ||
| 585 | |||
| 586 | if (dev_priv->overlay_priv) | ||
| 587 | return -EINVAL; | ||
| 588 | |||
| 589 | if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && | ||
| 590 | (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { | ||
| 591 | DRM_INFO("hardware doesn't support overlays\n"); | ||
| 592 | return -ENOSYS; | ||
| 593 | } | ||
| 594 | |||
| 595 | overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); | ||
| 596 | if (!overlay) | ||
| 597 | return -ENOMEM; | ||
| 598 | |||
| 599 | memset(overlay, 0, sizeof(*overlay)); | ||
| 600 | mutex_init(&overlay->mutex); | ||
| 601 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 602 | overlay->stream[i].buf = NULL; | ||
| 603 | overlay->stream[i].paused = false; | ||
| 604 | overlay->stream[i].claimed = false; | ||
| 605 | } | ||
| 606 | |||
| 607 | dev_priv->overlay_priv = overlay; | ||
| 608 | |||
| 609 | return 0; | ||
| 610 | } | ||
| 611 | |||
| 612 | int vmw_overlay_close(struct vmw_private *dev_priv) | ||
| 613 | { | ||
| 614 | struct vmw_overlay *overlay = dev_priv->overlay_priv; | ||
| 615 | bool forgotten_buffer = false; | ||
| 616 | int i; | ||
| 617 | |||
| 618 | if (!overlay) | ||
| 619 | return -ENOSYS; | ||
| 620 | |||
| 621 | for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { | ||
| 622 | if (overlay->stream[i].buf) { | ||
| 623 | forgotten_buffer = true; | ||
| 624 | vmw_overlay_stop(dev_priv, i, false, false); | ||
| 625 | } | ||
| 626 | } | ||
| 627 | |||
| 628 | WARN_ON(forgotten_buffer); | ||
| 629 | |||
| 630 | dev_priv->overlay_priv = NULL; | ||
| 631 | kfree(overlay); | ||
| 632 | |||
| 633 | return 0; | ||
| 634 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h new file mode 100644 index 000000000000..9d0dd3a342eb --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | /** | ||
| 29 | * This file contains virtual hardware defines for kernel space. | ||
| 30 | */ | ||
| 31 | |||
| 32 | #ifndef _VMWGFX_REG_H_ | ||
| 33 | #define _VMWGFX_REG_H_ | ||
| 34 | |||
| 35 | #include <linux/types.h> | ||
| 36 | |||
| 37 | #define VMWGFX_INDEX_PORT 0x0 | ||
| 38 | #define VMWGFX_VALUE_PORT 0x1 | ||
| 39 | #define VMWGFX_IRQSTATUS_PORT 0x8 | ||
| 40 | |||
| 41 | struct svga_guest_mem_descriptor { | ||
| 42 | __le32 ppn; | ||
| 43 | __le32 num_pages; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct svga_fifo_cmd_fence { | ||
| 47 | __le32 fence; | ||
| 48 | }; | ||
| 49 | |||
| 50 | #define SVGA_SYNC_GENERIC 1 | ||
| 51 | #define SVGA_SYNC_FIFOFULL 2 | ||
| 52 | |||
| 53 | #include "svga_types.h" | ||
| 54 | |||
| 55 | #include "svga3d_reg.h" | ||
| 56 | |||
| 57 | #endif | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c new file mode 100644 index 000000000000..a1ceed0c8e07 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -0,0 +1,1192 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "vmwgfx_drv.h" | ||
| 29 | #include "vmwgfx_drm.h" | ||
| 30 | #include "ttm/ttm_object.h" | ||
| 31 | #include "ttm/ttm_placement.h" | ||
| 32 | #include "drmP.h" | ||
| 33 | |||
| 34 | #define VMW_RES_CONTEXT ttm_driver_type0 | ||
| 35 | #define VMW_RES_SURFACE ttm_driver_type1 | ||
| 36 | #define VMW_RES_STREAM ttm_driver_type2 | ||
| 37 | |||
| 38 | struct vmw_user_context { | ||
| 39 | struct ttm_base_object base; | ||
| 40 | struct vmw_resource res; | ||
| 41 | }; | ||
| 42 | |||
| 43 | struct vmw_user_surface { | ||
| 44 | struct ttm_base_object base; | ||
| 45 | struct vmw_surface srf; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct vmw_user_dma_buffer { | ||
| 49 | struct ttm_base_object base; | ||
| 50 | struct vmw_dma_buffer dma; | ||
| 51 | }; | ||
| 52 | |||
| 53 | struct vmw_bo_user_rep { | ||
| 54 | uint32_t handle; | ||
| 55 | uint64_t map_handle; | ||
| 56 | }; | ||
| 57 | |||
| 58 | struct vmw_stream { | ||
| 59 | struct vmw_resource res; | ||
| 60 | uint32_t stream_id; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct vmw_user_stream { | ||
| 64 | struct ttm_base_object base; | ||
| 65 | struct vmw_stream stream; | ||
| 66 | }; | ||
| 67 | |||
| 68 | static inline struct vmw_dma_buffer * | ||
| 69 | vmw_dma_buffer(struct ttm_buffer_object *bo) | ||
| 70 | { | ||
| 71 | return container_of(bo, struct vmw_dma_buffer, base); | ||
| 72 | } | ||
| 73 | |||
| 74 | static inline struct vmw_user_dma_buffer * | ||
| 75 | vmw_user_dma_buffer(struct ttm_buffer_object *bo) | ||
| 76 | { | ||
| 77 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
| 78 | return container_of(vmw_bo, struct vmw_user_dma_buffer, dma); | ||
| 79 | } | ||
| 80 | |||
| 81 | struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) | ||
| 82 | { | ||
| 83 | kref_get(&res->kref); | ||
| 84 | return res; | ||
| 85 | } | ||
| 86 | |||
| 87 | static void vmw_resource_release(struct kref *kref) | ||
| 88 | { | ||
| 89 | struct vmw_resource *res = | ||
| 90 | container_of(kref, struct vmw_resource, kref); | ||
| 91 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 92 | |||
| 93 | idr_remove(res->idr, res->id); | ||
| 94 | write_unlock(&dev_priv->resource_lock); | ||
| 95 | |||
| 96 | if (likely(res->hw_destroy != NULL)) | ||
| 97 | res->hw_destroy(res); | ||
| 98 | |||
| 99 | if (res->res_free != NULL) | ||
| 100 | res->res_free(res); | ||
| 101 | else | ||
| 102 | kfree(res); | ||
| 103 | |||
| 104 | write_lock(&dev_priv->resource_lock); | ||
| 105 | } | ||
| 106 | |||
| 107 | void vmw_resource_unreference(struct vmw_resource **p_res) | ||
| 108 | { | ||
| 109 | struct vmw_resource *res = *p_res; | ||
| 110 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 111 | |||
| 112 | *p_res = NULL; | ||
| 113 | write_lock(&dev_priv->resource_lock); | ||
| 114 | kref_put(&res->kref, vmw_resource_release); | ||
| 115 | write_unlock(&dev_priv->resource_lock); | ||
| 116 | } | ||
| 117 | |||
| 118 | static int vmw_resource_init(struct vmw_private *dev_priv, | ||
| 119 | struct vmw_resource *res, | ||
| 120 | struct idr *idr, | ||
| 121 | enum ttm_object_type obj_type, | ||
| 122 | void (*res_free) (struct vmw_resource *res)) | ||
| 123 | { | ||
| 124 | int ret; | ||
| 125 | |||
| 126 | kref_init(&res->kref); | ||
| 127 | res->hw_destroy = NULL; | ||
| 128 | res->res_free = res_free; | ||
| 129 | res->res_type = obj_type; | ||
| 130 | res->idr = idr; | ||
| 131 | res->avail = false; | ||
| 132 | res->dev_priv = dev_priv; | ||
| 133 | |||
| 134 | do { | ||
| 135 | if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0)) | ||
| 136 | return -ENOMEM; | ||
| 137 | |||
| 138 | write_lock(&dev_priv->resource_lock); | ||
| 139 | ret = idr_get_new_above(idr, res, 1, &res->id); | ||
| 140 | write_unlock(&dev_priv->resource_lock); | ||
| 141 | |||
| 142 | } while (ret == -EAGAIN); | ||
| 143 | |||
| 144 | return ret; | ||
| 145 | } | ||
| 146 | |||
| 147 | /** | ||
| 148 | * vmw_resource_activate | ||
| 149 | * | ||
| 150 | * @res: Pointer to the newly created resource | ||
| 151 | * @hw_destroy: Destroy function. NULL if none. | ||
| 152 | * | ||
| 153 | * Activate a resource after the hardware has been made aware of it. | ||
| 154 | * Set tye destroy function to @destroy. Typically this frees the | ||
| 155 | * resource and destroys the hardware resources associated with it. | ||
| 156 | * Activate basically means that the function vmw_resource_lookup will | ||
| 157 | * find it. | ||
| 158 | */ | ||
| 159 | |||
| 160 | static void vmw_resource_activate(struct vmw_resource *res, | ||
| 161 | void (*hw_destroy) (struct vmw_resource *)) | ||
| 162 | { | ||
| 163 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 164 | |||
| 165 | write_lock(&dev_priv->resource_lock); | ||
| 166 | res->avail = true; | ||
| 167 | res->hw_destroy = hw_destroy; | ||
| 168 | write_unlock(&dev_priv->resource_lock); | ||
| 169 | } | ||
| 170 | |||
| 171 | struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, | ||
| 172 | struct idr *idr, int id) | ||
| 173 | { | ||
| 174 | struct vmw_resource *res; | ||
| 175 | |||
| 176 | read_lock(&dev_priv->resource_lock); | ||
| 177 | res = idr_find(idr, id); | ||
| 178 | if (res && res->avail) | ||
| 179 | kref_get(&res->kref); | ||
| 180 | else | ||
| 181 | res = NULL; | ||
| 182 | read_unlock(&dev_priv->resource_lock); | ||
| 183 | |||
| 184 | if (unlikely(res == NULL)) | ||
| 185 | return NULL; | ||
| 186 | |||
| 187 | return res; | ||
| 188 | } | ||
| 189 | |||
| 190 | /** | ||
| 191 | * Context management: | ||
| 192 | */ | ||
| 193 | |||
| 194 | static void vmw_hw_context_destroy(struct vmw_resource *res) | ||
| 195 | { | ||
| 196 | |||
| 197 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 198 | struct { | ||
| 199 | SVGA3dCmdHeader header; | ||
| 200 | SVGA3dCmdDestroyContext body; | ||
| 201 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 202 | |||
| 203 | if (unlikely(cmd == NULL)) { | ||
| 204 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 205 | "destruction.\n"); | ||
| 206 | return; | ||
| 207 | } | ||
| 208 | |||
| 209 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); | ||
| 210 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
| 211 | cmd->body.cid = cpu_to_le32(res->id); | ||
| 212 | |||
| 213 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 214 | } | ||
| 215 | |||
| 216 | static int vmw_context_init(struct vmw_private *dev_priv, | ||
| 217 | struct vmw_resource *res, | ||
| 218 | void (*res_free) (struct vmw_resource *res)) | ||
| 219 | { | ||
| 220 | int ret; | ||
| 221 | |||
| 222 | struct { | ||
| 223 | SVGA3dCmdHeader header; | ||
| 224 | SVGA3dCmdDefineContext body; | ||
| 225 | } *cmd; | ||
| 226 | |||
| 227 | ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr, | ||
| 228 | VMW_RES_CONTEXT, res_free); | ||
| 229 | |||
| 230 | if (unlikely(ret != 0)) { | ||
| 231 | if (res_free == NULL) | ||
| 232 | kfree(res); | ||
| 233 | else | ||
| 234 | res_free(res); | ||
| 235 | return ret; | ||
| 236 | } | ||
| 237 | |||
| 238 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 239 | if (unlikely(cmd == NULL)) { | ||
| 240 | DRM_ERROR("Fifo reserve failed.\n"); | ||
| 241 | vmw_resource_unreference(&res); | ||
| 242 | return -ENOMEM; | ||
| 243 | } | ||
| 244 | |||
| 245 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); | ||
| 246 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
| 247 | cmd->body.cid = cpu_to_le32(res->id); | ||
| 248 | |||
| 249 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 250 | vmw_resource_activate(res, vmw_hw_context_destroy); | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) | ||
| 255 | { | ||
| 256 | struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); | ||
| 257 | int ret; | ||
| 258 | |||
| 259 | if (unlikely(res == NULL)) | ||
| 260 | return NULL; | ||
| 261 | |||
| 262 | ret = vmw_context_init(dev_priv, res, NULL); | ||
| 263 | return (ret == 0) ? res : NULL; | ||
| 264 | } | ||
| 265 | |||
| 266 | /** | ||
| 267 | * User-space context management: | ||
| 268 | */ | ||
| 269 | |||
| 270 | static void vmw_user_context_free(struct vmw_resource *res) | ||
| 271 | { | ||
| 272 | struct vmw_user_context *ctx = | ||
| 273 | container_of(res, struct vmw_user_context, res); | ||
| 274 | |||
| 275 | kfree(ctx); | ||
| 276 | } | ||
| 277 | |||
| 278 | /** | ||
| 279 | * This function is called when user space has no more references on the | ||
| 280 | * base object. It releases the base-object's reference on the resource object. | ||
| 281 | */ | ||
| 282 | |||
| 283 | static void vmw_user_context_base_release(struct ttm_base_object **p_base) | ||
| 284 | { | ||
| 285 | struct ttm_base_object *base = *p_base; | ||
| 286 | struct vmw_user_context *ctx = | ||
| 287 | container_of(base, struct vmw_user_context, base); | ||
| 288 | struct vmw_resource *res = &ctx->res; | ||
| 289 | |||
| 290 | *p_base = NULL; | ||
| 291 | vmw_resource_unreference(&res); | ||
| 292 | } | ||
| 293 | |||
| 294 | int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 295 | struct drm_file *file_priv) | ||
| 296 | { | ||
| 297 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 298 | struct vmw_resource *res; | ||
| 299 | struct vmw_user_context *ctx; | ||
| 300 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
| 301 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 302 | int ret = 0; | ||
| 303 | |||
| 304 | res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid); | ||
| 305 | if (unlikely(res == NULL)) | ||
| 306 | return -EINVAL; | ||
| 307 | |||
| 308 | if (res->res_free != &vmw_user_context_free) { | ||
| 309 | ret = -EINVAL; | ||
| 310 | goto out; | ||
| 311 | } | ||
| 312 | |||
| 313 | ctx = container_of(res, struct vmw_user_context, res); | ||
| 314 | if (ctx->base.tfile != tfile && !ctx->base.shareable) { | ||
| 315 | ret = -EPERM; | ||
| 316 | goto out; | ||
| 317 | } | ||
| 318 | |||
| 319 | ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE); | ||
| 320 | out: | ||
| 321 | vmw_resource_unreference(&res); | ||
| 322 | return ret; | ||
| 323 | } | ||
| 324 | |||
| 325 | int vmw_context_define_ioctl(struct drm_device *dev, void *data, | ||
| 326 | struct drm_file *file_priv) | ||
| 327 | { | ||
| 328 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 329 | struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); | ||
| 330 | struct vmw_resource *res; | ||
| 331 | struct vmw_resource *tmp; | ||
| 332 | struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data; | ||
| 333 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 334 | int ret; | ||
| 335 | |||
| 336 | if (unlikely(ctx == NULL)) | ||
| 337 | return -ENOMEM; | ||
| 338 | |||
| 339 | res = &ctx->res; | ||
| 340 | ctx->base.shareable = false; | ||
| 341 | ctx->base.tfile = NULL; | ||
| 342 | |||
| 343 | ret = vmw_context_init(dev_priv, res, vmw_user_context_free); | ||
| 344 | if (unlikely(ret != 0)) | ||
| 345 | return ret; | ||
| 346 | |||
| 347 | tmp = vmw_resource_reference(&ctx->res); | ||
| 348 | ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT, | ||
| 349 | &vmw_user_context_base_release, NULL); | ||
| 350 | |||
| 351 | if (unlikely(ret != 0)) { | ||
| 352 | vmw_resource_unreference(&tmp); | ||
| 353 | goto out_err; | ||
| 354 | } | ||
| 355 | |||
| 356 | arg->cid = res->id; | ||
| 357 | out_err: | ||
| 358 | vmw_resource_unreference(&res); | ||
| 359 | return ret; | ||
| 360 | |||
| 361 | } | ||
| 362 | |||
| 363 | int vmw_context_check(struct vmw_private *dev_priv, | ||
| 364 | struct ttm_object_file *tfile, | ||
| 365 | int id) | ||
| 366 | { | ||
| 367 | struct vmw_resource *res; | ||
| 368 | int ret = 0; | ||
| 369 | |||
| 370 | read_lock(&dev_priv->resource_lock); | ||
| 371 | res = idr_find(&dev_priv->context_idr, id); | ||
| 372 | if (res && res->avail) { | ||
| 373 | struct vmw_user_context *ctx = | ||
| 374 | container_of(res, struct vmw_user_context, res); | ||
| 375 | if (ctx->base.tfile != tfile && !ctx->base.shareable) | ||
| 376 | ret = -EPERM; | ||
| 377 | } else | ||
| 378 | ret = -EINVAL; | ||
| 379 | read_unlock(&dev_priv->resource_lock); | ||
| 380 | |||
| 381 | return ret; | ||
| 382 | } | ||
| 383 | |||
| 384 | |||
| 385 | /** | ||
| 386 | * Surface management. | ||
| 387 | */ | ||
| 388 | |||
| 389 | static void vmw_hw_surface_destroy(struct vmw_resource *res) | ||
| 390 | { | ||
| 391 | |||
| 392 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 393 | struct { | ||
| 394 | SVGA3dCmdHeader header; | ||
| 395 | SVGA3dCmdDestroySurface body; | ||
| 396 | } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
| 397 | |||
| 398 | if (unlikely(cmd == NULL)) { | ||
| 399 | DRM_ERROR("Failed reserving FIFO space for surface " | ||
| 400 | "destruction.\n"); | ||
| 401 | return; | ||
| 402 | } | ||
| 403 | |||
| 404 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY); | ||
| 405 | cmd->header.size = cpu_to_le32(sizeof(cmd->body)); | ||
| 406 | cmd->body.sid = cpu_to_le32(res->id); | ||
| 407 | |||
| 408 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
| 409 | } | ||
| 410 | |||
| 411 | void vmw_surface_res_free(struct vmw_resource *res) | ||
| 412 | { | ||
| 413 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
| 414 | |||
| 415 | kfree(srf->sizes); | ||
| 416 | kfree(srf->snooper.image); | ||
| 417 | kfree(srf); | ||
| 418 | } | ||
| 419 | |||
| 420 | int vmw_surface_init(struct vmw_private *dev_priv, | ||
| 421 | struct vmw_surface *srf, | ||
| 422 | void (*res_free) (struct vmw_resource *res)) | ||
| 423 | { | ||
| 424 | int ret; | ||
| 425 | struct { | ||
| 426 | SVGA3dCmdHeader header; | ||
| 427 | SVGA3dCmdDefineSurface body; | ||
| 428 | } *cmd; | ||
| 429 | SVGA3dSize *cmd_size; | ||
| 430 | struct vmw_resource *res = &srf->res; | ||
| 431 | struct drm_vmw_size *src_size; | ||
| 432 | size_t submit_size; | ||
| 433 | uint32_t cmd_len; | ||
| 434 | int i; | ||
| 435 | |||
| 436 | BUG_ON(res_free == NULL); | ||
| 437 | ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr, | ||
| 438 | VMW_RES_SURFACE, res_free); | ||
| 439 | |||
| 440 | if (unlikely(ret != 0)) { | ||
| 441 | res_free(res); | ||
| 442 | return ret; | ||
| 443 | } | ||
| 444 | |||
| 445 | submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize); | ||
| 446 | cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize); | ||
| 447 | |||
| 448 | cmd = vmw_fifo_reserve(dev_priv, submit_size); | ||
| 449 | if (unlikely(cmd == NULL)) { | ||
| 450 | DRM_ERROR("Fifo reserve failed for create surface.\n"); | ||
| 451 | vmw_resource_unreference(&res); | ||
| 452 | return -ENOMEM; | ||
| 453 | } | ||
| 454 | |||
| 455 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE); | ||
| 456 | cmd->header.size = cpu_to_le32(cmd_len); | ||
| 457 | cmd->body.sid = cpu_to_le32(res->id); | ||
| 458 | cmd->body.surfaceFlags = cpu_to_le32(srf->flags); | ||
| 459 | cmd->body.format = cpu_to_le32(srf->format); | ||
| 460 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) { | ||
| 461 | cmd->body.face[i].numMipLevels = | ||
| 462 | cpu_to_le32(srf->mip_levels[i]); | ||
| 463 | } | ||
| 464 | |||
| 465 | cmd += 1; | ||
| 466 | cmd_size = (SVGA3dSize *) cmd; | ||
| 467 | src_size = srf->sizes; | ||
| 468 | |||
| 469 | for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) { | ||
| 470 | cmd_size->width = cpu_to_le32(src_size->width); | ||
| 471 | cmd_size->height = cpu_to_le32(src_size->height); | ||
| 472 | cmd_size->depth = cpu_to_le32(src_size->depth); | ||
| 473 | } | ||
| 474 | |||
| 475 | vmw_fifo_commit(dev_priv, submit_size); | ||
| 476 | vmw_resource_activate(res, vmw_hw_surface_destroy); | ||
| 477 | return 0; | ||
| 478 | } | ||
| 479 | |||
| 480 | static void vmw_user_surface_free(struct vmw_resource *res) | ||
| 481 | { | ||
| 482 | struct vmw_surface *srf = container_of(res, struct vmw_surface, res); | ||
| 483 | struct vmw_user_surface *user_srf = | ||
| 484 | container_of(srf, struct vmw_user_surface, srf); | ||
| 485 | |||
| 486 | kfree(srf->sizes); | ||
| 487 | kfree(srf->snooper.image); | ||
| 488 | kfree(user_srf); | ||
| 489 | } | ||
| 490 | |||
| 491 | int vmw_user_surface_lookup(struct vmw_private *dev_priv, | ||
| 492 | struct ttm_object_file *tfile, | ||
| 493 | int sid, struct vmw_surface **out) | ||
| 494 | { | ||
| 495 | struct vmw_resource *res; | ||
| 496 | struct vmw_surface *srf; | ||
| 497 | struct vmw_user_surface *user_srf; | ||
| 498 | |||
| 499 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid); | ||
| 500 | if (unlikely(res == NULL)) | ||
| 501 | return -EINVAL; | ||
| 502 | |||
| 503 | if (res->res_free != &vmw_user_surface_free) | ||
| 504 | return -EINVAL; | ||
| 505 | |||
| 506 | srf = container_of(res, struct vmw_surface, res); | ||
| 507 | user_srf = container_of(srf, struct vmw_user_surface, srf); | ||
| 508 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) | ||
| 509 | return -EPERM; | ||
| 510 | |||
| 511 | *out = srf; | ||
| 512 | return 0; | ||
| 513 | } | ||
| 514 | |||
| 515 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | ||
| 516 | { | ||
| 517 | struct ttm_base_object *base = *p_base; | ||
| 518 | struct vmw_user_surface *user_srf = | ||
| 519 | container_of(base, struct vmw_user_surface, base); | ||
| 520 | struct vmw_resource *res = &user_srf->srf.res; | ||
| 521 | |||
| 522 | *p_base = NULL; | ||
| 523 | vmw_resource_unreference(&res); | ||
| 524 | } | ||
| 525 | |||
| 526 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | ||
| 527 | struct drm_file *file_priv) | ||
| 528 | { | ||
| 529 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 530 | struct vmw_resource *res; | ||
| 531 | struct vmw_surface *srf; | ||
| 532 | struct vmw_user_surface *user_srf; | ||
| 533 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | ||
| 534 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 535 | int ret = 0; | ||
| 536 | |||
| 537 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid); | ||
| 538 | if (unlikely(res == NULL)) | ||
| 539 | return -EINVAL; | ||
| 540 | |||
| 541 | if (res->res_free != &vmw_user_surface_free) { | ||
| 542 | ret = -EINVAL; | ||
| 543 | goto out; | ||
| 544 | } | ||
| 545 | |||
| 546 | srf = container_of(res, struct vmw_surface, res); | ||
| 547 | user_srf = container_of(srf, struct vmw_user_surface, srf); | ||
| 548 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { | ||
| 549 | ret = -EPERM; | ||
| 550 | goto out; | ||
| 551 | } | ||
| 552 | |||
| 553 | ttm_ref_object_base_unref(tfile, user_srf->base.hash.key, | ||
| 554 | TTM_REF_USAGE); | ||
| 555 | out: | ||
| 556 | vmw_resource_unreference(&res); | ||
| 557 | return ret; | ||
| 558 | } | ||
| 559 | |||
| 560 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | ||
| 561 | struct drm_file *file_priv) | ||
| 562 | { | ||
| 563 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 564 | struct vmw_user_surface *user_srf = | ||
| 565 | kmalloc(sizeof(*user_srf), GFP_KERNEL); | ||
| 566 | struct vmw_surface *srf; | ||
| 567 | struct vmw_resource *res; | ||
| 568 | struct vmw_resource *tmp; | ||
| 569 | union drm_vmw_surface_create_arg *arg = | ||
| 570 | (union drm_vmw_surface_create_arg *)data; | ||
| 571 | struct drm_vmw_surface_create_req *req = &arg->req; | ||
| 572 | struct drm_vmw_surface_arg *rep = &arg->rep; | ||
| 573 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 574 | struct drm_vmw_size __user *user_sizes; | ||
| 575 | int ret; | ||
| 576 | int i; | ||
| 577 | |||
| 578 | if (unlikely(user_srf == NULL)) | ||
| 579 | return -ENOMEM; | ||
| 580 | |||
| 581 | srf = &user_srf->srf; | ||
| 582 | res = &srf->res; | ||
| 583 | |||
| 584 | srf->flags = req->flags; | ||
| 585 | srf->format = req->format; | ||
| 586 | memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels)); | ||
| 587 | srf->num_sizes = 0; | ||
| 588 | for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) | ||
| 589 | srf->num_sizes += srf->mip_levels[i]; | ||
| 590 | |||
| 591 | if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES * | ||
| 592 | DRM_VMW_MAX_MIP_LEVELS) { | ||
| 593 | ret = -EINVAL; | ||
| 594 | goto out_err0; | ||
| 595 | } | ||
| 596 | |||
| 597 | srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL); | ||
| 598 | if (unlikely(srf->sizes == NULL)) { | ||
| 599 | ret = -ENOMEM; | ||
| 600 | goto out_err0; | ||
| 601 | } | ||
| 602 | |||
| 603 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
| 604 | req->size_addr; | ||
| 605 | |||
| 606 | ret = copy_from_user(srf->sizes, user_sizes, | ||
| 607 | srf->num_sizes * sizeof(*srf->sizes)); | ||
| 608 | if (unlikely(ret != 0)) | ||
| 609 | goto out_err1; | ||
| 610 | |||
| 611 | user_srf->base.shareable = false; | ||
| 612 | user_srf->base.tfile = NULL; | ||
| 613 | |||
| 614 | /** | ||
| 615 | * From this point, the generic resource management functions | ||
| 616 | * destroy the object on failure. | ||
| 617 | */ | ||
| 618 | |||
| 619 | ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); | ||
| 620 | if (unlikely(ret != 0)) | ||
| 621 | return ret; | ||
| 622 | |||
| 623 | tmp = vmw_resource_reference(&srf->res); | ||
| 624 | ret = ttm_base_object_init(tfile, &user_srf->base, | ||
| 625 | req->shareable, VMW_RES_SURFACE, | ||
| 626 | &vmw_user_surface_base_release, NULL); | ||
| 627 | |||
| 628 | if (unlikely(ret != 0)) { | ||
| 629 | vmw_resource_unreference(&tmp); | ||
| 630 | vmw_resource_unreference(&res); | ||
| 631 | return ret; | ||
| 632 | } | ||
| 633 | |||
| 634 | if (srf->flags & (1 << 9) && | ||
| 635 | srf->num_sizes == 1 && | ||
| 636 | srf->sizes[0].width == 64 && | ||
| 637 | srf->sizes[0].height == 64 && | ||
| 638 | srf->format == SVGA3D_A8R8G8B8) { | ||
| 639 | |||
| 640 | srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL); | ||
| 641 | /* clear the image */ | ||
| 642 | if (srf->snooper.image) | ||
| 643 | memset(srf->snooper.image, 0x00, 64 * 64 * 4); | ||
| 644 | else | ||
| 645 | DRM_ERROR("Failed to allocate cursor_image\n"); | ||
| 646 | |||
| 647 | } else { | ||
| 648 | srf->snooper.image = NULL; | ||
| 649 | } | ||
| 650 | srf->snooper.crtc = NULL; | ||
| 651 | |||
| 652 | rep->sid = res->id; | ||
| 653 | vmw_resource_unreference(&res); | ||
| 654 | return 0; | ||
| 655 | out_err1: | ||
| 656 | kfree(srf->sizes); | ||
| 657 | out_err0: | ||
| 658 | kfree(user_srf); | ||
| 659 | return ret; | ||
| 660 | } | ||
| 661 | |||
| 662 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | ||
| 663 | struct drm_file *file_priv) | ||
| 664 | { | ||
| 665 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 666 | union drm_vmw_surface_reference_arg *arg = | ||
| 667 | (union drm_vmw_surface_reference_arg *)data; | ||
| 668 | struct drm_vmw_surface_arg *req = &arg->req; | ||
| 669 | struct drm_vmw_surface_create_req *rep = &arg->rep; | ||
| 670 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 671 | struct vmw_resource *res; | ||
| 672 | struct vmw_surface *srf; | ||
| 673 | struct vmw_user_surface *user_srf; | ||
| 674 | struct drm_vmw_size __user *user_sizes; | ||
| 675 | int ret; | ||
| 676 | |||
| 677 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid); | ||
| 678 | if (unlikely(res == NULL)) | ||
| 679 | return -EINVAL; | ||
| 680 | |||
| 681 | if (res->res_free != &vmw_user_surface_free) { | ||
| 682 | ret = -EINVAL; | ||
| 683 | goto out; | ||
| 684 | } | ||
| 685 | |||
| 686 | srf = container_of(res, struct vmw_surface, res); | ||
| 687 | user_srf = container_of(srf, struct vmw_user_surface, srf); | ||
| 688 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { | ||
| 689 | DRM_ERROR("Tried to reference none shareable surface\n"); | ||
| 690 | ret = -EPERM; | ||
| 691 | goto out; | ||
| 692 | } | ||
| 693 | |||
| 694 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | ||
| 695 | if (unlikely(ret != 0)) { | ||
| 696 | DRM_ERROR("Could not add a reference to a surface.\n"); | ||
| 697 | goto out; | ||
| 698 | } | ||
| 699 | |||
| 700 | rep->flags = srf->flags; | ||
| 701 | rep->format = srf->format; | ||
| 702 | memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); | ||
| 703 | user_sizes = (struct drm_vmw_size __user *)(unsigned long) | ||
| 704 | rep->size_addr; | ||
| 705 | |||
| 706 | if (user_sizes) | ||
| 707 | ret = copy_to_user(user_sizes, srf->sizes, | ||
| 708 | srf->num_sizes * sizeof(*srf->sizes)); | ||
| 709 | if (unlikely(ret != 0)) { | ||
| 710 | DRM_ERROR("copy_to_user failed %p %u\n", | ||
| 711 | user_sizes, srf->num_sizes); | ||
| 712 | /** | ||
| 713 | * FIXME: Unreference surface here? | ||
| 714 | */ | ||
| 715 | goto out; | ||
| 716 | } | ||
| 717 | out: | ||
| 718 | vmw_resource_unreference(&res); | ||
| 719 | return ret; | ||
| 720 | } | ||
| 721 | |||
| 722 | int vmw_surface_check(struct vmw_private *dev_priv, | ||
| 723 | struct ttm_object_file *tfile, | ||
| 724 | int id) | ||
| 725 | { | ||
| 726 | struct vmw_resource *res; | ||
| 727 | int ret = 0; | ||
| 728 | |||
| 729 | read_lock(&dev_priv->resource_lock); | ||
| 730 | res = idr_find(&dev_priv->surface_idr, id); | ||
| 731 | if (res && res->avail) { | ||
| 732 | struct vmw_surface *srf = | ||
| 733 | container_of(res, struct vmw_surface, res); | ||
| 734 | struct vmw_user_surface *usrf = | ||
| 735 | container_of(srf, struct vmw_user_surface, srf); | ||
| 736 | |||
| 737 | if (usrf->base.tfile != tfile && !usrf->base.shareable) | ||
| 738 | ret = -EPERM; | ||
| 739 | } else | ||
| 740 | ret = -EINVAL; | ||
| 741 | read_unlock(&dev_priv->resource_lock); | ||
| 742 | |||
| 743 | return ret; | ||
| 744 | } | ||
| 745 | |||
| 746 | /** | ||
| 747 | * Buffer management. | ||
| 748 | */ | ||
| 749 | |||
| 750 | static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob, | ||
| 751 | unsigned long num_pages) | ||
| 752 | { | ||
| 753 | static size_t bo_user_size = ~0; | ||
| 754 | |||
| 755 | size_t page_array_size = | ||
| 756 | (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK; | ||
| 757 | |||
| 758 | if (unlikely(bo_user_size == ~0)) { | ||
| 759 | bo_user_size = glob->ttm_bo_extra_size + | ||
| 760 | ttm_round_pot(sizeof(struct vmw_dma_buffer)); | ||
| 761 | } | ||
| 762 | |||
| 763 | return bo_user_size + page_array_size; | ||
| 764 | } | ||
| 765 | |||
| 766 | void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) | ||
| 767 | { | ||
| 768 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
| 769 | struct ttm_bo_global *glob = bo->glob; | ||
| 770 | struct vmw_private *dev_priv = | ||
| 771 | container_of(bo->bdev, struct vmw_private, bdev); | ||
| 772 | |||
| 773 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
| 774 | if (vmw_bo->gmr_bound) { | ||
| 775 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
| 776 | spin_lock(&glob->lru_lock); | ||
| 777 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
| 778 | spin_unlock(&glob->lru_lock); | ||
| 779 | } | ||
| 780 | kfree(vmw_bo); | ||
| 781 | } | ||
| 782 | |||
| 783 | int vmw_dmabuf_init(struct vmw_private *dev_priv, | ||
| 784 | struct vmw_dma_buffer *vmw_bo, | ||
| 785 | size_t size, struct ttm_placement *placement, | ||
| 786 | bool interruptible, | ||
| 787 | void (*bo_free) (struct ttm_buffer_object *bo)) | ||
| 788 | { | ||
| 789 | struct ttm_bo_device *bdev = &dev_priv->bdev; | ||
| 790 | struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; | ||
| 791 | size_t acc_size; | ||
| 792 | int ret; | ||
| 793 | |||
| 794 | BUG_ON(!bo_free); | ||
| 795 | |||
| 796 | acc_size = | ||
| 797 | vmw_dmabuf_acc_size(bdev->glob, | ||
| 798 | (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | ||
| 799 | |||
| 800 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); | ||
| 801 | if (unlikely(ret != 0)) { | ||
| 802 | /* we must free the bo here as | ||
| 803 | * ttm_buffer_object_init does so as well */ | ||
| 804 | bo_free(&vmw_bo->base); | ||
| 805 | return ret; | ||
| 806 | } | ||
| 807 | |||
| 808 | memset(vmw_bo, 0, sizeof(*vmw_bo)); | ||
| 809 | |||
| 810 | INIT_LIST_HEAD(&vmw_bo->gmr_lru); | ||
| 811 | INIT_LIST_HEAD(&vmw_bo->validate_list); | ||
| 812 | vmw_bo->gmr_id = 0; | ||
| 813 | vmw_bo->gmr_bound = false; | ||
| 814 | |||
| 815 | ret = ttm_bo_init(bdev, &vmw_bo->base, size, | ||
| 816 | ttm_bo_type_device, placement, | ||
| 817 | 0, 0, interruptible, | ||
| 818 | NULL, acc_size, bo_free); | ||
| 819 | return ret; | ||
| 820 | } | ||
| 821 | |||
| 822 | static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) | ||
| 823 | { | ||
| 824 | struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); | ||
| 825 | struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma; | ||
| 826 | struct ttm_bo_global *glob = bo->glob; | ||
| 827 | struct vmw_private *dev_priv = | ||
| 828 | container_of(bo->bdev, struct vmw_private, bdev); | ||
| 829 | |||
| 830 | ttm_mem_global_free(glob->mem_glob, bo->acc_size); | ||
| 831 | if (vmw_bo->gmr_bound) { | ||
| 832 | vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id); | ||
| 833 | spin_lock(&glob->lru_lock); | ||
| 834 | ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id); | ||
| 835 | spin_unlock(&glob->lru_lock); | ||
| 836 | } | ||
| 837 | kfree(vmw_user_bo); | ||
| 838 | } | ||
| 839 | |||
| 840 | static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) | ||
| 841 | { | ||
| 842 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
| 843 | struct ttm_base_object *base = *p_base; | ||
| 844 | struct ttm_buffer_object *bo; | ||
| 845 | |||
| 846 | *p_base = NULL; | ||
| 847 | |||
| 848 | if (unlikely(base == NULL)) | ||
| 849 | return; | ||
| 850 | |||
| 851 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | ||
| 852 | bo = &vmw_user_bo->dma.base; | ||
| 853 | ttm_bo_unref(&bo); | ||
| 854 | } | ||
| 855 | |||
| 856 | int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, | ||
| 857 | struct drm_file *file_priv) | ||
| 858 | { | ||
| 859 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 860 | union drm_vmw_alloc_dmabuf_arg *arg = | ||
| 861 | (union drm_vmw_alloc_dmabuf_arg *)data; | ||
| 862 | struct drm_vmw_alloc_dmabuf_req *req = &arg->req; | ||
| 863 | struct drm_vmw_dmabuf_rep *rep = &arg->rep; | ||
| 864 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
| 865 | struct ttm_buffer_object *tmp; | ||
| 866 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
| 867 | int ret; | ||
| 868 | |||
| 869 | vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); | ||
| 870 | if (unlikely(vmw_user_bo == NULL)) | ||
| 871 | return -ENOMEM; | ||
| 872 | |||
| 873 | ret = ttm_read_lock(&vmaster->lock, true); | ||
| 874 | if (unlikely(ret != 0)) { | ||
| 875 | kfree(vmw_user_bo); | ||
| 876 | return ret; | ||
| 877 | } | ||
| 878 | |||
| 879 | ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, | ||
| 880 | &vmw_vram_placement, true, | ||
| 881 | &vmw_user_dmabuf_destroy); | ||
| 882 | if (unlikely(ret != 0)) | ||
| 883 | return ret; | ||
| 884 | |||
| 885 | tmp = ttm_bo_reference(&vmw_user_bo->dma.base); | ||
| 886 | ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, | ||
| 887 | &vmw_user_bo->base, | ||
| 888 | false, | ||
| 889 | ttm_buffer_type, | ||
| 890 | &vmw_user_dmabuf_release, NULL); | ||
| 891 | if (unlikely(ret != 0)) { | ||
| 892 | ttm_bo_unref(&tmp); | ||
| 893 | } else { | ||
| 894 | rep->handle = vmw_user_bo->base.hash.key; | ||
| 895 | rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; | ||
| 896 | rep->cur_gmr_id = vmw_user_bo->base.hash.key; | ||
| 897 | rep->cur_gmr_offset = 0; | ||
| 898 | } | ||
| 899 | ttm_bo_unref(&tmp); | ||
| 900 | |||
| 901 | ttm_read_unlock(&vmaster->lock); | ||
| 902 | |||
| 903 | return 0; | ||
| 904 | } | ||
| 905 | |||
| 906 | int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, | ||
| 907 | struct drm_file *file_priv) | ||
| 908 | { | ||
| 909 | struct drm_vmw_unref_dmabuf_arg *arg = | ||
| 910 | (struct drm_vmw_unref_dmabuf_arg *)data; | ||
| 911 | |||
| 912 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | ||
| 913 | arg->handle, | ||
| 914 | TTM_REF_USAGE); | ||
| 915 | } | ||
| 916 | |||
| 917 | uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, | ||
| 918 | uint32_t cur_validate_node) | ||
| 919 | { | ||
| 920 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
| 921 | |||
| 922 | if (likely(vmw_bo->on_validate_list)) | ||
| 923 | return vmw_bo->cur_validate_node; | ||
| 924 | |||
| 925 | vmw_bo->cur_validate_node = cur_validate_node; | ||
| 926 | vmw_bo->on_validate_list = true; | ||
| 927 | |||
| 928 | return cur_validate_node; | ||
| 929 | } | ||
| 930 | |||
| 931 | void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo) | ||
| 932 | { | ||
| 933 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
| 934 | |||
| 935 | vmw_bo->on_validate_list = false; | ||
| 936 | } | ||
| 937 | |||
| 938 | uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo) | ||
| 939 | { | ||
| 940 | struct vmw_dma_buffer *vmw_bo; | ||
| 941 | |||
| 942 | if (bo->mem.mem_type == TTM_PL_VRAM) | ||
| 943 | return SVGA_GMR_FRAMEBUFFER; | ||
| 944 | |||
| 945 | vmw_bo = vmw_dma_buffer(bo); | ||
| 946 | |||
| 947 | return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL; | ||
| 948 | } | ||
| 949 | |||
| 950 | void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id) | ||
| 951 | { | ||
| 952 | struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); | ||
| 953 | vmw_bo->gmr_bound = true; | ||
| 954 | vmw_bo->gmr_id = id; | ||
| 955 | } | ||
| 956 | |||
| 957 | int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, | ||
| 958 | uint32_t handle, struct vmw_dma_buffer **out) | ||
| 959 | { | ||
| 960 | struct vmw_user_dma_buffer *vmw_user_bo; | ||
| 961 | struct ttm_base_object *base; | ||
| 962 | |||
| 963 | base = ttm_base_object_lookup(tfile, handle); | ||
| 964 | if (unlikely(base == NULL)) { | ||
| 965 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | ||
| 966 | (unsigned long)handle); | ||
| 967 | return -ESRCH; | ||
| 968 | } | ||
| 969 | |||
| 970 | if (unlikely(base->object_type != ttm_buffer_type)) { | ||
| 971 | ttm_base_object_unref(&base); | ||
| 972 | printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", | ||
| 973 | (unsigned long)handle); | ||
| 974 | return -EINVAL; | ||
| 975 | } | ||
| 976 | |||
| 977 | vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); | ||
| 978 | (void)ttm_bo_reference(&vmw_user_bo->dma.base); | ||
| 979 | ttm_base_object_unref(&base); | ||
| 980 | *out = &vmw_user_bo->dma; | ||
| 981 | |||
| 982 | return 0; | ||
| 983 | } | ||
| 984 | |||
| 985 | /** | ||
| 986 | * TODO: Implement a gmr id eviction mechanism. Currently we just fail | ||
| 987 | * when we're out of ids, causing GMR space to be allocated | ||
| 988 | * out of VRAM. | ||
| 989 | */ | ||
| 990 | |||
| 991 | int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id) | ||
| 992 | { | ||
| 993 | struct ttm_bo_global *glob = dev_priv->bdev.glob; | ||
| 994 | int id; | ||
| 995 | int ret; | ||
| 996 | |||
| 997 | do { | ||
| 998 | if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0)) | ||
| 999 | return -ENOMEM; | ||
| 1000 | |||
| 1001 | spin_lock(&glob->lru_lock); | ||
| 1002 | ret = ida_get_new(&dev_priv->gmr_ida, &id); | ||
| 1003 | spin_unlock(&glob->lru_lock); | ||
| 1004 | } while (ret == -EAGAIN); | ||
| 1005 | |||
| 1006 | if (unlikely(ret != 0)) | ||
| 1007 | return ret; | ||
| 1008 | |||
| 1009 | if (unlikely(id >= dev_priv->max_gmr_ids)) { | ||
| 1010 | spin_lock(&glob->lru_lock); | ||
| 1011 | ida_remove(&dev_priv->gmr_ida, id); | ||
| 1012 | spin_unlock(&glob->lru_lock); | ||
| 1013 | return -EBUSY; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | *p_id = (uint32_t) id; | ||
| 1017 | return 0; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | /* | ||
| 1021 | * Stream managment | ||
| 1022 | */ | ||
| 1023 | |||
| 1024 | static void vmw_stream_destroy(struct vmw_resource *res) | ||
| 1025 | { | ||
| 1026 | struct vmw_private *dev_priv = res->dev_priv; | ||
| 1027 | struct vmw_stream *stream; | ||
| 1028 | int ret; | ||
| 1029 | |||
| 1030 | DRM_INFO("%s: unref\n", __func__); | ||
| 1031 | stream = container_of(res, struct vmw_stream, res); | ||
| 1032 | |||
| 1033 | ret = vmw_overlay_unref(dev_priv, stream->stream_id); | ||
| 1034 | WARN_ON(ret != 0); | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | static int vmw_stream_init(struct vmw_private *dev_priv, | ||
| 1038 | struct vmw_stream *stream, | ||
| 1039 | void (*res_free) (struct vmw_resource *res)) | ||
| 1040 | { | ||
| 1041 | struct vmw_resource *res = &stream->res; | ||
| 1042 | int ret; | ||
| 1043 | |||
| 1044 | ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, | ||
| 1045 | VMW_RES_STREAM, res_free); | ||
| 1046 | |||
| 1047 | if (unlikely(ret != 0)) { | ||
| 1048 | if (res_free == NULL) | ||
| 1049 | kfree(stream); | ||
| 1050 | else | ||
| 1051 | res_free(&stream->res); | ||
| 1052 | return ret; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | ret = vmw_overlay_claim(dev_priv, &stream->stream_id); | ||
| 1056 | if (ret) { | ||
| 1057 | vmw_resource_unreference(&res); | ||
| 1058 | return ret; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | DRM_INFO("%s: claimed\n", __func__); | ||
| 1062 | |||
| 1063 | vmw_resource_activate(&stream->res, vmw_stream_destroy); | ||
| 1064 | return 0; | ||
| 1065 | } | ||
| 1066 | |||
| 1067 | /** | ||
| 1068 | * User-space context management: | ||
| 1069 | */ | ||
| 1070 | |||
| 1071 | static void vmw_user_stream_free(struct vmw_resource *res) | ||
| 1072 | { | ||
| 1073 | struct vmw_user_stream *stream = | ||
| 1074 | container_of(res, struct vmw_user_stream, stream.res); | ||
| 1075 | |||
| 1076 | kfree(stream); | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | /** | ||
| 1080 | * This function is called when user space has no more references on the | ||
| 1081 | * base object. It releases the base-object's reference on the resource object. | ||
| 1082 | */ | ||
| 1083 | |||
| 1084 | static void vmw_user_stream_base_release(struct ttm_base_object **p_base) | ||
| 1085 | { | ||
| 1086 | struct ttm_base_object *base = *p_base; | ||
| 1087 | struct vmw_user_stream *stream = | ||
| 1088 | container_of(base, struct vmw_user_stream, base); | ||
| 1089 | struct vmw_resource *res = &stream->stream.res; | ||
| 1090 | |||
| 1091 | *p_base = NULL; | ||
| 1092 | vmw_resource_unreference(&res); | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, | ||
| 1096 | struct drm_file *file_priv) | ||
| 1097 | { | ||
| 1098 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1099 | struct vmw_resource *res; | ||
| 1100 | struct vmw_user_stream *stream; | ||
| 1101 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | ||
| 1102 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1103 | int ret = 0; | ||
| 1104 | |||
| 1105 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); | ||
| 1106 | if (unlikely(res == NULL)) | ||
| 1107 | return -EINVAL; | ||
| 1108 | |||
| 1109 | if (res->res_free != &vmw_user_stream_free) { | ||
| 1110 | ret = -EINVAL; | ||
| 1111 | goto out; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | stream = container_of(res, struct vmw_user_stream, stream.res); | ||
| 1115 | if (stream->base.tfile != tfile) { | ||
| 1116 | ret = -EINVAL; | ||
| 1117 | goto out; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE); | ||
| 1121 | out: | ||
| 1122 | vmw_resource_unreference(&res); | ||
| 1123 | return ret; | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, | ||
| 1127 | struct drm_file *file_priv) | ||
| 1128 | { | ||
| 1129 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 1130 | struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL); | ||
| 1131 | struct vmw_resource *res; | ||
| 1132 | struct vmw_resource *tmp; | ||
| 1133 | struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; | ||
| 1134 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
| 1135 | int ret; | ||
| 1136 | |||
| 1137 | if (unlikely(stream == NULL)) | ||
| 1138 | return -ENOMEM; | ||
| 1139 | |||
| 1140 | res = &stream->stream.res; | ||
| 1141 | stream->base.shareable = false; | ||
| 1142 | stream->base.tfile = NULL; | ||
| 1143 | |||
| 1144 | ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free); | ||
| 1145 | if (unlikely(ret != 0)) | ||
| 1146 | return ret; | ||
| 1147 | |||
| 1148 | tmp = vmw_resource_reference(res); | ||
| 1149 | ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM, | ||
| 1150 | &vmw_user_stream_base_release, NULL); | ||
| 1151 | |||
| 1152 | if (unlikely(ret != 0)) { | ||
| 1153 | vmw_resource_unreference(&tmp); | ||
| 1154 | goto out_err; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | arg->stream_id = res->id; | ||
| 1158 | out_err: | ||
| 1159 | vmw_resource_unreference(&res); | ||
| 1160 | return ret; | ||
| 1161 | } | ||
| 1162 | |||
| 1163 | int vmw_user_stream_lookup(struct vmw_private *dev_priv, | ||
| 1164 | struct ttm_object_file *tfile, | ||
| 1165 | uint32_t *inout_id, struct vmw_resource **out) | ||
| 1166 | { | ||
| 1167 | struct vmw_user_stream *stream; | ||
| 1168 | struct vmw_resource *res; | ||
| 1169 | int ret; | ||
| 1170 | |||
| 1171 | res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); | ||
| 1172 | if (unlikely(res == NULL)) | ||
| 1173 | return -EINVAL; | ||
| 1174 | |||
| 1175 | if (res->res_free != &vmw_user_stream_free) { | ||
| 1176 | ret = -EINVAL; | ||
| 1177 | goto err_ref; | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | stream = container_of(res, struct vmw_user_stream, stream.res); | ||
| 1181 | if (stream->base.tfile != tfile) { | ||
| 1182 | ret = -EPERM; | ||
| 1183 | goto err_ref; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | *inout_id = stream->stream.stream_id; | ||
| 1187 | *out = res; | ||
| 1188 | return 0; | ||
| 1189 | err_ref: | ||
| 1190 | vmw_resource_unreference(&res); | ||
| 1191 | return ret; | ||
| 1192 | } | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c new file mode 100644 index 000000000000..e3df4adfb4d8 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c | |||
| @@ -0,0 +1,99 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #include "drmP.h" | ||
| 29 | #include "vmwgfx_drv.h" | ||
| 30 | |||
| 31 | int vmw_mmap(struct file *filp, struct vm_area_struct *vma) | ||
| 32 | { | ||
| 33 | struct drm_file *file_priv; | ||
| 34 | struct vmw_private *dev_priv; | ||
| 35 | |||
| 36 | if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { | ||
| 37 | if (vmw_fifo_mmap(filp, vma) == 0) | ||
| 38 | return 0; | ||
| 39 | return drm_mmap(filp, vma); | ||
| 40 | } | ||
| 41 | |||
| 42 | file_priv = (struct drm_file *)filp->private_data; | ||
| 43 | dev_priv = vmw_priv(file_priv->minor->dev); | ||
| 44 | return ttm_bo_mmap(filp, vma, &dev_priv->bdev); | ||
| 45 | } | ||
| 46 | |||
| 47 | static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref) | ||
| 48 | { | ||
| 49 | DRM_INFO("global init.\n"); | ||
| 50 | return ttm_mem_global_init(ref->object); | ||
| 51 | } | ||
| 52 | |||
| 53 | static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref) | ||
| 54 | { | ||
| 55 | ttm_mem_global_release(ref->object); | ||
| 56 | } | ||
| 57 | |||
| 58 | int vmw_ttm_global_init(struct vmw_private *dev_priv) | ||
| 59 | { | ||
| 60 | struct ttm_global_reference *global_ref; | ||
| 61 | int ret; | ||
| 62 | |||
| 63 | global_ref = &dev_priv->mem_global_ref; | ||
| 64 | global_ref->global_type = TTM_GLOBAL_TTM_MEM; | ||
| 65 | global_ref->size = sizeof(struct ttm_mem_global); | ||
| 66 | global_ref->init = &vmw_ttm_mem_global_init; | ||
| 67 | global_ref->release = &vmw_ttm_mem_global_release; | ||
| 68 | |||
| 69 | ret = ttm_global_item_ref(global_ref); | ||
| 70 | if (unlikely(ret != 0)) { | ||
| 71 | DRM_ERROR("Failed setting up TTM memory accounting.\n"); | ||
| 72 | return ret; | ||
| 73 | } | ||
| 74 | |||
| 75 | dev_priv->bo_global_ref.mem_glob = | ||
| 76 | dev_priv->mem_global_ref.object; | ||
| 77 | global_ref = &dev_priv->bo_global_ref.ref; | ||
| 78 | global_ref->global_type = TTM_GLOBAL_TTM_BO; | ||
| 79 | global_ref->size = sizeof(struct ttm_bo_global); | ||
| 80 | global_ref->init = &ttm_bo_global_init; | ||
| 81 | global_ref->release = &ttm_bo_global_release; | ||
| 82 | ret = ttm_global_item_ref(global_ref); | ||
| 83 | |||
| 84 | if (unlikely(ret != 0)) { | ||
| 85 | DRM_ERROR("Failed setting up TTM buffer objects.\n"); | ||
| 86 | goto out_no_bo; | ||
| 87 | } | ||
| 88 | |||
| 89 | return 0; | ||
| 90 | out_no_bo: | ||
| 91 | ttm_global_item_unref(&dev_priv->mem_global_ref); | ||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | |||
| 95 | void vmw_ttm_global_release(struct vmw_private *dev_priv) | ||
| 96 | { | ||
| 97 | ttm_global_item_unref(&dev_priv->bo_global_ref.ref); | ||
| 98 | ttm_global_item_unref(&dev_priv->mem_global_ref); | ||
| 99 | } | ||
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d21b3469f6d7..89f725fe064d 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig | |||
| @@ -101,6 +101,8 @@ source "drivers/staging/p9auth/Kconfig" | |||
| 101 | 101 | ||
| 102 | source "drivers/staging/line6/Kconfig" | 102 | source "drivers/staging/line6/Kconfig" |
| 103 | 103 | ||
| 104 | source "drivers/gpu/drm/vmwgfx/Kconfig" | ||
| 105 | |||
| 104 | source "drivers/gpu/drm/radeon/Kconfig" | 106 | source "drivers/gpu/drm/radeon/Kconfig" |
| 105 | 107 | ||
| 106 | source "drivers/staging/octeon/Kconfig" | 108 | source "drivers/staging/octeon/Kconfig" |
diff --git a/include/drm/Kbuild b/include/drm/Kbuild index b940fdfa3b25..1e83f85be819 100644 --- a/include/drm/Kbuild +++ b/include/drm/Kbuild | |||
| @@ -7,4 +7,5 @@ unifdef-y += r128_drm.h | |||
| 7 | unifdef-y += radeon_drm.h | 7 | unifdef-y += radeon_drm.h |
| 8 | unifdef-y += sis_drm.h | 8 | unifdef-y += sis_drm.h |
| 9 | unifdef-y += savage_drm.h | 9 | unifdef-y += savage_drm.h |
| 10 | unifdef-y += vmwgfx_drm.h | ||
| 10 | unifdef-y += via_drm.h | 11 | unifdef-y += via_drm.h |
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h new file mode 100644 index 000000000000..2be7e1249b6f --- /dev/null +++ b/include/drm/vmwgfx_drm.h | |||
| @@ -0,0 +1,574 @@ | |||
| 1 | /************************************************************************** | ||
| 2 | * | ||
| 3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
| 4 | * All Rights Reserved. | ||
| 5 | * | ||
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 7 | * copy of this software and associated documentation files (the | ||
| 8 | * "Software"), to deal in the Software without restriction, including | ||
| 9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
| 10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
| 11 | * permit persons to whom the Software is furnished to do so, subject to | ||
| 12 | * the following conditions: | ||
| 13 | * | ||
| 14 | * The above copyright notice and this permission notice (including the | ||
| 15 | * next paragraph) shall be included in all copies or substantial portions | ||
| 16 | * of the Software. | ||
| 17 | * | ||
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
| 21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
| 22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
| 23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
| 24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
| 25 | * | ||
| 26 | **************************************************************************/ | ||
| 27 | |||
| 28 | #ifndef __VMWGFX_DRM_H__ | ||
| 29 | #define __VMWGFX_DRM_H__ | ||
| 30 | |||
| 31 | #define DRM_VMW_MAX_SURFACE_FACES 6 | ||
| 32 | #define DRM_VMW_MAX_MIP_LEVELS 24 | ||
| 33 | |||
| 34 | #define DRM_VMW_EXT_NAME_LEN 128 | ||
| 35 | |||
| 36 | #define DRM_VMW_GET_PARAM 0 | ||
| 37 | #define DRM_VMW_ALLOC_DMABUF 1 | ||
| 38 | #define DRM_VMW_UNREF_DMABUF 2 | ||
| 39 | #define DRM_VMW_CURSOR_BYPASS 3 | ||
| 40 | /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/ | ||
| 41 | #define DRM_VMW_CONTROL_STREAM 4 | ||
| 42 | #define DRM_VMW_CLAIM_STREAM 5 | ||
| 43 | #define DRM_VMW_UNREF_STREAM 6 | ||
| 44 | /* guarded by DRM_VMW_PARAM_3D == 1 */ | ||
| 45 | #define DRM_VMW_CREATE_CONTEXT 7 | ||
| 46 | #define DRM_VMW_UNREF_CONTEXT 8 | ||
| 47 | #define DRM_VMW_CREATE_SURFACE 9 | ||
| 48 | #define DRM_VMW_UNREF_SURFACE 10 | ||
| 49 | #define DRM_VMW_REF_SURFACE 11 | ||
| 50 | #define DRM_VMW_EXECBUF 12 | ||
| 51 | #define DRM_VMW_FIFO_DEBUG 13 | ||
| 52 | #define DRM_VMW_FENCE_WAIT 14 | ||
| 53 | |||
| 54 | |||
| 55 | /*************************************************************************/ | ||
| 56 | /** | ||
| 57 | * DRM_VMW_GET_PARAM - get device information. | ||
| 58 | * | ||
| 59 | * DRM_VMW_PARAM_FIFO_OFFSET: | ||
| 60 | * Offset to use to map the first page of the FIFO read-only. | ||
| 61 | * The fifo is mapped using the mmap() system call on the drm device. | ||
| 62 | * | ||
| 63 | * DRM_VMW_PARAM_OVERLAY_IOCTL: | ||
| 64 | * Does the driver support the overlay ioctl. | ||
| 65 | */ | ||
| 66 | |||
| 67 | #define DRM_VMW_PARAM_NUM_STREAMS 0 | ||
| 68 | #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1 | ||
| 69 | #define DRM_VMW_PARAM_3D 2 | ||
| 70 | #define DRM_VMW_PARAM_FIFO_OFFSET 3 | ||
| 71 | |||
| 72 | |||
| 73 | /** | ||
| 74 | * struct drm_vmw_getparam_arg | ||
| 75 | * | ||
| 76 | * @value: Returned value. //Out | ||
| 77 | * @param: Parameter to query. //In. | ||
| 78 | * | ||
| 79 | * Argument to the DRM_VMW_GET_PARAM Ioctl. | ||
| 80 | */ | ||
| 81 | |||
| 82 | struct drm_vmw_getparam_arg { | ||
| 83 | uint64_t value; | ||
| 84 | uint32_t param; | ||
| 85 | uint32_t pad64; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /*************************************************************************/ | ||
| 89 | /** | ||
| 90 | * DRM_VMW_EXTENSION - Query device extensions. | ||
| 91 | */ | ||
| 92 | |||
| 93 | /** | ||
| 94 | * struct drm_vmw_extension_rep | ||
| 95 | * | ||
| 96 | * @exists: The queried extension exists. | ||
| 97 | * @driver_ioctl_offset: Ioctl number of the first ioctl in the extension. | ||
| 98 | * @driver_sarea_offset: Offset to any space in the DRI SAREA | ||
| 99 | * used by the extension. | ||
| 100 | * @major: Major version number of the extension. | ||
| 101 | * @minor: Minor version number of the extension. | ||
| 102 | * @pl: Patch level version number of the extension. | ||
| 103 | * | ||
| 104 | * Output argument to the DRM_VMW_EXTENSION Ioctl. | ||
| 105 | */ | ||
| 106 | |||
| 107 | struct drm_vmw_extension_rep { | ||
| 108 | int32_t exists; | ||
| 109 | uint32_t driver_ioctl_offset; | ||
| 110 | uint32_t driver_sarea_offset; | ||
| 111 | uint32_t major; | ||
| 112 | uint32_t minor; | ||
| 113 | uint32_t pl; | ||
| 114 | uint32_t pad64; | ||
| 115 | }; | ||
| 116 | |||
| 117 | /** | ||
| 118 | * union drm_vmw_extension_arg | ||
| 119 | * | ||
| 120 | * @extension - Ascii name of the extension to be queried. //In | ||
| 121 | * @rep - Reply as defined above. //Out | ||
| 122 | * | ||
| 123 | * Argument to the DRM_VMW_EXTENSION Ioctl. | ||
| 124 | */ | ||
| 125 | |||
| 126 | union drm_vmw_extension_arg { | ||
| 127 | char extension[DRM_VMW_EXT_NAME_LEN]; | ||
| 128 | struct drm_vmw_extension_rep rep; | ||
| 129 | }; | ||
| 130 | |||
| 131 | /*************************************************************************/ | ||
| 132 | /** | ||
| 133 | * DRM_VMW_CREATE_CONTEXT - Create a host context. | ||
| 134 | * | ||
| 135 | * Allocates a device unique context id, and queues a create context command | ||
| 136 | * for the host. Does not wait for host completion. | ||
| 137 | */ | ||
| 138 | |||
| 139 | /** | ||
| 140 | * struct drm_vmw_context_arg | ||
| 141 | * | ||
| 142 | * @cid: Device unique context ID. | ||
| 143 | * | ||
| 144 | * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. | ||
| 145 | * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. | ||
| 146 | */ | ||
| 147 | |||
| 148 | struct drm_vmw_context_arg { | ||
| 149 | int32_t cid; | ||
| 150 | uint32_t pad64; | ||
| 151 | }; | ||
| 152 | |||
| 153 | /*************************************************************************/ | ||
| 154 | /** | ||
| 155 | * DRM_VMW_UNREF_CONTEXT - Create a host context. | ||
| 156 | * | ||
| 157 | * Frees a global context id, and queues a destroy host command for the host. | ||
| 158 | * Does not wait for host completion. The context ID can be used directly | ||
| 159 | * in the command stream and shows up as the same context ID on the host. | ||
| 160 | */ | ||
| 161 | |||
| 162 | /*************************************************************************/ | ||
| 163 | /** | ||
| 164 | * DRM_VMW_CREATE_SURFACE - Create a host suface. | ||
| 165 | * | ||
| 166 | * Allocates a device unique surface id, and queues a create surface command | ||
| 167 | * for the host. Does not wait for host completion. The surface ID can be | ||
| 168 | * used directly in the command stream and shows up as the same surface | ||
| 169 | * ID on the host. | ||
| 170 | */ | ||
| 171 | |||
| 172 | /** | ||
| 173 | * struct drm_wmv_surface_create_req | ||
| 174 | * | ||
| 175 | * @flags: Surface flags as understood by the host. | ||
| 176 | * @format: Surface format as understood by the host. | ||
| 177 | * @mip_levels: Number of mip levels for each face. | ||
| 178 | * An unused face should have 0 encoded. | ||
| 179 | * @size_addr: Address of a user-space array of sruct drm_vmw_size | ||
| 180 | * cast to an uint64_t for 32-64 bit compatibility. | ||
| 181 | * The size of the array should equal the total number of mipmap levels. | ||
| 182 | * @shareable: Boolean whether other clients (as identified by file descriptors) | ||
| 183 | * may reference this surface. | ||
| 184 | * | ||
| 185 | * Input data to the DRM_VMW_CREATE_SURFACE Ioctl. | ||
| 186 | * Output data from the DRM_VMW_REF_SURFACE Ioctl. | ||
| 187 | */ | ||
| 188 | |||
| 189 | struct drm_vmw_surface_create_req { | ||
| 190 | uint32_t flags; | ||
| 191 | uint32_t format; | ||
| 192 | uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; | ||
| 193 | uint64_t size_addr; | ||
| 194 | int32_t shareable; | ||
| 195 | uint32_t pad64; | ||
| 196 | }; | ||
| 197 | |||
| 198 | /** | ||
| 199 | * struct drm_wmv_surface_arg | ||
| 200 | * | ||
| 201 | * @sid: Surface id of created surface or surface to destroy or reference. | ||
| 202 | * | ||
| 203 | * Output data from the DRM_VMW_CREATE_SURFACE Ioctl. | ||
| 204 | * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl. | ||
| 205 | * Input argument to the DRM_VMW_REF_SURFACE Ioctl. | ||
| 206 | */ | ||
| 207 | |||
| 208 | struct drm_vmw_surface_arg { | ||
| 209 | int32_t sid; | ||
| 210 | uint32_t pad64; | ||
| 211 | }; | ||
| 212 | |||
| 213 | /** | ||
| 214 | * struct drm_vmw_size ioctl. | ||
| 215 | * | ||
| 216 | * @width - mip level width | ||
| 217 | * @height - mip level height | ||
| 218 | * @depth - mip level depth | ||
| 219 | * | ||
| 220 | * Description of a mip level. | ||
| 221 | * Input data to the DRM_WMW_CREATE_SURFACE Ioctl. | ||
| 222 | */ | ||
| 223 | |||
| 224 | struct drm_vmw_size { | ||
| 225 | uint32_t width; | ||
| 226 | uint32_t height; | ||
| 227 | uint32_t depth; | ||
| 228 | uint32_t pad64; | ||
| 229 | }; | ||
| 230 | |||
| 231 | /** | ||
| 232 | * union drm_vmw_surface_create_arg | ||
| 233 | * | ||
| 234 | * @rep: Output data as described above. | ||
| 235 | * @req: Input data as described above. | ||
| 236 | * | ||
| 237 | * Argument to the DRM_VMW_CREATE_SURFACE Ioctl. | ||
| 238 | */ | ||
| 239 | |||
| 240 | union drm_vmw_surface_create_arg { | ||
| 241 | struct drm_vmw_surface_arg rep; | ||
| 242 | struct drm_vmw_surface_create_req req; | ||
| 243 | }; | ||
| 244 | |||
| 245 | /*************************************************************************/ | ||
| 246 | /** | ||
| 247 | * DRM_VMW_REF_SURFACE - Reference a host surface. | ||
| 248 | * | ||
| 249 | * Puts a reference on a host surface with a give sid, as previously | ||
| 250 | * returned by the DRM_VMW_CREATE_SURFACE ioctl. | ||
| 251 | * A reference will make sure the surface isn't destroyed while we hold | ||
| 252 | * it and will allow the calling client to use the surface ID in the command | ||
| 253 | * stream. | ||
| 254 | * | ||
| 255 | * On successful return, the Ioctl returns the surface information given | ||
| 256 | * in the DRM_VMW_CREATE_SURFACE ioctl. | ||
| 257 | */ | ||
| 258 | |||
| 259 | /** | ||
| 260 | * union drm_vmw_surface_reference_arg | ||
| 261 | * | ||
| 262 | * @rep: Output data as described above. | ||
| 263 | * @req: Input data as described above. | ||
| 264 | * | ||
| 265 | * Argument to the DRM_VMW_REF_SURFACE Ioctl. | ||
| 266 | */ | ||
| 267 | |||
| 268 | union drm_vmw_surface_reference_arg { | ||
| 269 | struct drm_vmw_surface_create_req rep; | ||
| 270 | struct drm_vmw_surface_arg req; | ||
| 271 | }; | ||
| 272 | |||
| 273 | /*************************************************************************/ | ||
| 274 | /** | ||
| 275 | * DRM_VMW_UNREF_SURFACE - Unreference a host surface. | ||
| 276 | * | ||
| 277 | * Clear a reference previously put on a host surface. | ||
| 278 | * When all references are gone, including the one implicitly placed | ||
| 279 | * on creation, | ||
| 280 | * a destroy surface command will be queued for the host. | ||
| 281 | * Does not wait for completion. | ||
| 282 | */ | ||
| 283 | |||
| 284 | /*************************************************************************/ | ||
| 285 | /** | ||
| 286 | * DRM_VMW_EXECBUF | ||
| 287 | * | ||
| 288 | * Submit a command buffer for execution on the host, and return a | ||
| 289 | * fence sequence that when signaled, indicates that the command buffer has | ||
| 290 | * executed. | ||
| 291 | */ | ||
| 292 | |||
| 293 | /** | ||
| 294 | * struct drm_vmw_execbuf_arg | ||
| 295 | * | ||
| 296 | * @commands: User-space address of a command buffer cast to an uint64_t. | ||
| 297 | * @command-size: Size in bytes of the command buffer. | ||
| 298 | * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an | ||
| 299 | * uint64_t. | ||
| 300 | * | ||
| 301 | * Argument to the DRM_VMW_EXECBUF Ioctl. | ||
| 302 | */ | ||
| 303 | |||
| 304 | struct drm_vmw_execbuf_arg { | ||
| 305 | uint64_t commands; | ||
| 306 | uint32_t command_size; | ||
| 307 | uint32_t pad64; | ||
| 308 | uint64_t fence_rep; | ||
| 309 | }; | ||
| 310 | |||
| 311 | /** | ||
| 312 | * struct drm_vmw_fence_rep | ||
| 313 | * | ||
| 314 | * @fence_seq: Fence sequence associated with a command submission. | ||
| 315 | * @error: This member should've been set to -EFAULT on submission. | ||
| 316 | * The following actions should be take on completion: | ||
| 317 | * error == -EFAULT: Fence communication failed. The host is synchronized. | ||
| 318 | * Use the last fence id read from the FIFO fence register. | ||
| 319 | * error != 0 && error != -EFAULT: | ||
| 320 | * Fence submission failed. The host is synchronized. Use the fence_seq member. | ||
| 321 | * error == 0: All is OK, The host may not be synchronized. | ||
| 322 | * Use the fence_seq member. | ||
| 323 | * | ||
| 324 | * Input / Output data to the DRM_VMW_EXECBUF Ioctl. | ||
| 325 | */ | ||
| 326 | |||
| 327 | struct drm_vmw_fence_rep { | ||
| 328 | uint64_t fence_seq; | ||
| 329 | int32_t error; | ||
| 330 | uint32_t pad64; | ||
| 331 | }; | ||
| 332 | |||
| 333 | /*************************************************************************/ | ||
| 334 | /** | ||
| 335 | * DRM_VMW_ALLOC_DMABUF | ||
| 336 | * | ||
| 337 | * Allocate a DMA buffer that is visible also to the host. | ||
| 338 | * NOTE: The buffer is | ||
| 339 | * identified by a handle and an offset, which are private to the guest, but | ||
| 340 | * useable in the command stream. The guest kernel may translate these | ||
| 341 | * and patch up the command stream accordingly. In the future, the offset may | ||
| 342 | * be zero at all times, or it may disappear from the interface before it is | ||
| 343 | * fixed. | ||
| 344 | * | ||
| 345 | * The DMA buffer may stay user-space mapped in the guest at all times, | ||
| 346 | * and is thus suitable for sub-allocation. | ||
| 347 | * | ||
| 348 | * DMA buffers are mapped using the mmap() syscall on the drm device. | ||
| 349 | */ | ||
| 350 | |||
| 351 | /** | ||
| 352 | * struct drm_vmw_alloc_dmabuf_req | ||
| 353 | * | ||
| 354 | * @size: Required minimum size of the buffer. | ||
| 355 | * | ||
| 356 | * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl. | ||
| 357 | */ | ||
| 358 | |||
| 359 | struct drm_vmw_alloc_dmabuf_req { | ||
| 360 | uint32_t size; | ||
| 361 | uint32_t pad64; | ||
| 362 | }; | ||
| 363 | |||
| 364 | /** | ||
| 365 | * struct drm_vmw_dmabuf_rep | ||
| 366 | * | ||
| 367 | * @map_handle: Offset to use in the mmap() call used to map the buffer. | ||
| 368 | * @handle: Handle unique to this buffer. Used for unreferencing. | ||
| 369 | * @cur_gmr_id: GMR id to use in the command stream when this buffer is | ||
| 370 | * referenced. See not above. | ||
| 371 | * @cur_gmr_offset: Offset to use in the command stream when this buffer is | ||
| 372 | * referenced. See note above. | ||
| 373 | * | ||
| 374 | * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl. | ||
| 375 | */ | ||
| 376 | |||
| 377 | struct drm_vmw_dmabuf_rep { | ||
| 378 | uint64_t map_handle; | ||
| 379 | uint32_t handle; | ||
| 380 | uint32_t cur_gmr_id; | ||
| 381 | uint32_t cur_gmr_offset; | ||
| 382 | uint32_t pad64; | ||
| 383 | }; | ||
| 384 | |||
| 385 | /** | ||
| 386 | * union drm_vmw_dmabuf_arg | ||
| 387 | * | ||
| 388 | * @req: Input data as described above. | ||
| 389 | * @rep: Output data as described above. | ||
| 390 | * | ||
| 391 | * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl. | ||
| 392 | */ | ||
| 393 | |||
| 394 | union drm_vmw_alloc_dmabuf_arg { | ||
| 395 | struct drm_vmw_alloc_dmabuf_req req; | ||
| 396 | struct drm_vmw_dmabuf_rep rep; | ||
| 397 | }; | ||
| 398 | |||
| 399 | /*************************************************************************/ | ||
| 400 | /** | ||
| 401 | * DRM_VMW_UNREF_DMABUF - Free a DMA buffer. | ||
| 402 | * | ||
| 403 | */ | ||
| 404 | |||
| 405 | /** | ||
| 406 | * struct drm_vmw_unref_dmabuf_arg | ||
| 407 | * | ||
| 408 | * @handle: Handle indicating what buffer to free. Obtained from the | ||
| 409 | * DRM_VMW_ALLOC_DMABUF Ioctl. | ||
| 410 | * | ||
| 411 | * Argument to the DRM_VMW_UNREF_DMABUF Ioctl. | ||
| 412 | */ | ||
| 413 | |||
| 414 | struct drm_vmw_unref_dmabuf_arg { | ||
| 415 | uint32_t handle; | ||
| 416 | uint32_t pad64; | ||
| 417 | }; | ||
| 418 | |||
| 419 | /*************************************************************************/ | ||
| 420 | /** | ||
| 421 | * DRM_VMW_FIFO_DEBUG - Get last FIFO submission. | ||
| 422 | * | ||
| 423 | * This IOCTL copies the last FIFO submission directly out of the FIFO buffer. | ||
| 424 | */ | ||
| 425 | |||
| 426 | /** | ||
| 427 | * struct drm_vmw_fifo_debug_arg | ||
| 428 | * | ||
| 429 | * @debug_buffer: User space address of a debug_buffer cast to an uint64_t //In | ||
| 430 | * @debug_buffer_size: Size in bytes of debug buffer //In | ||
| 431 | * @used_size: Number of bytes copied to the buffer // Out | ||
| 432 | * @did_not_fit: Boolean indicating that the fifo contents did not fit. //Out | ||
| 433 | * | ||
| 434 | * Argument to the DRM_VMW_FIFO_DEBUG Ioctl. | ||
| 435 | */ | ||
| 436 | |||
| 437 | struct drm_vmw_fifo_debug_arg { | ||
| 438 | uint64_t debug_buffer; | ||
| 439 | uint32_t debug_buffer_size; | ||
| 440 | uint32_t used_size; | ||
| 441 | int32_t did_not_fit; | ||
| 442 | uint32_t pad64; | ||
| 443 | }; | ||
| 444 | |||
| 445 | struct drm_vmw_fence_wait_arg { | ||
| 446 | uint64_t sequence; | ||
| 447 | uint64_t kernel_cookie; | ||
| 448 | int32_t cookie_valid; | ||
| 449 | int32_t pad64; | ||
| 450 | }; | ||
| 451 | |||
| 452 | /*************************************************************************/ | ||
| 453 | /** | ||
| 454 | * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. | ||
| 455 | * | ||
| 456 | * This IOCTL controls the overlay units of the svga device. | ||
| 457 | * The SVGA overlay units does not work like regular hardware units in | ||
| 458 | * that they do not automaticaly read back the contents of the given dma | ||
| 459 | * buffer. But instead only read back for each call to this ioctl, and | ||
| 460 | * at any point between this call being made and a following call that | ||
| 461 | * either changes the buffer or disables the stream. | ||
| 462 | */ | ||
| 463 | |||
| 464 | /** | ||
| 465 | * struct drm_vmw_rect | ||
| 466 | * | ||
| 467 | * Defines a rectangle. Used in the overlay ioctl to define | ||
| 468 | * source and destination rectangle. | ||
| 469 | */ | ||
| 470 | |||
| 471 | struct drm_vmw_rect { | ||
| 472 | int32_t x; | ||
| 473 | int32_t y; | ||
| 474 | uint32_t w; | ||
| 475 | uint32_t h; | ||
| 476 | }; | ||
| 477 | |||
| 478 | /** | ||
| 479 | * struct drm_vmw_control_stream_arg | ||
| 480 | * | ||
| 481 | * @stream_id: Stearm to control | ||
| 482 | * @enabled: If false all following arguments are ignored. | ||
| 483 | * @handle: Handle to buffer for getting data from. | ||
| 484 | * @format: Format of the overlay as understood by the host. | ||
| 485 | * @width: Width of the overlay. | ||
| 486 | * @height: Height of the overlay. | ||
| 487 | * @size: Size of the overlay in bytes. | ||
| 488 | * @pitch: Array of pitches, the two last are only used for YUV12 formats. | ||
| 489 | * @offset: Offset from start of dma buffer to overlay. | ||
| 490 | * @src: Source rect, must be within the defined area above. | ||
| 491 | * @dst: Destination rect, x and y may be negative. | ||
| 492 | * | ||
| 493 | * Argument to the DRM_VMW_CONTROL_STREAM Ioctl. | ||
| 494 | */ | ||
| 495 | |||
| 496 | struct drm_vmw_control_stream_arg { | ||
| 497 | uint32_t stream_id; | ||
| 498 | uint32_t enabled; | ||
| 499 | |||
| 500 | uint32_t flags; | ||
| 501 | uint32_t color_key; | ||
| 502 | |||
| 503 | uint32_t handle; | ||
| 504 | uint32_t offset; | ||
| 505 | int32_t format; | ||
| 506 | uint32_t size; | ||
| 507 | uint32_t width; | ||
| 508 | uint32_t height; | ||
| 509 | uint32_t pitch[3]; | ||
| 510 | |||
| 511 | uint32_t pad64; | ||
| 512 | struct drm_vmw_rect src; | ||
| 513 | struct drm_vmw_rect dst; | ||
| 514 | }; | ||
| 515 | |||
| 516 | /*************************************************************************/ | ||
| 517 | /** | ||
| 518 | * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass. | ||
| 519 | * | ||
| 520 | */ | ||
| 521 | |||
| 522 | #define DRM_VMW_CURSOR_BYPASS_ALL (1 << 0) | ||
| 523 | #define DRM_VMW_CURSOR_BYPASS_FLAGS (1) | ||
| 524 | |||
| 525 | /** | ||
| 526 | * struct drm_vmw_cursor_bypass_arg | ||
| 527 | * | ||
| 528 | * @flags: Flags. | ||
| 529 | * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed. | ||
| 530 | * @xpos: X position of cursor. | ||
| 531 | * @ypos: Y position of cursor. | ||
| 532 | * @xhot: X hotspot. | ||
| 533 | * @yhot: Y hotspot. | ||
| 534 | * | ||
| 535 | * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl. | ||
| 536 | */ | ||
| 537 | |||
| 538 | struct drm_vmw_cursor_bypass_arg { | ||
| 539 | uint32_t flags; | ||
| 540 | uint32_t crtc_id; | ||
| 541 | int32_t xpos; | ||
| 542 | int32_t ypos; | ||
| 543 | int32_t xhot; | ||
| 544 | int32_t yhot; | ||
| 545 | }; | ||
| 546 | |||
| 547 | /*************************************************************************/ | ||
| 548 | /** | ||
| 549 | * DRM_VMW_CLAIM_STREAM - Claim a single stream. | ||
| 550 | */ | ||
| 551 | |||
| 552 | /** | ||
| 553 | * struct drm_vmw_context_arg | ||
| 554 | * | ||
| 555 | * @stream_id: Device unique context ID. | ||
| 556 | * | ||
| 557 | * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl. | ||
| 558 | * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl. | ||
| 559 | */ | ||
| 560 | |||
| 561 | struct drm_vmw_stream_arg { | ||
| 562 | uint32_t stream_id; | ||
| 563 | uint32_t pad64; | ||
| 564 | }; | ||
| 565 | |||
| 566 | /*************************************************************************/ | ||
| 567 | /** | ||
| 568 | * DRM_VMW_UNREF_STREAM - Unclaim a stream. | ||
| 569 | * | ||
| 570 | * Return a single stream that was claimed by this process. Also makes | ||
| 571 | * sure that the stream has been stopped. | ||
| 572 | */ | ||
| 573 | |||
| 574 | #endif | ||
