aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c112
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c619
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h105
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--include/drm/vmwgfx_drm.h149
10 files changed, 1010 insertions, 87 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index f41e8b499978..7d8e9d5d498c 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,7 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o
8 9
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 10obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 72d95617bc59..5d665ce8cbe4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -274,39 +274,39 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
274 274
275static void *vmw_sync_obj_ref(void *sync_obj) 275static void *vmw_sync_obj_ref(void *sync_obj)
276{ 276{
277 return sync_obj; 277
278 return (void *)
279 vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
278} 280}
279 281
280static void vmw_sync_obj_unref(void **sync_obj) 282static void vmw_sync_obj_unref(void **sync_obj)
281{ 283{
282 *sync_obj = NULL; 284 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
283} 285}
284 286
285static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) 287static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
286{ 288{
287 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; 289 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
288
289 mutex_lock(&dev_priv->hw_mutex);
290 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
291 mutex_unlock(&dev_priv->hw_mutex);
292 return 0; 290 return 0;
293} 291}
294 292
295static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) 293static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
296{ 294{
297 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; 295 unsigned long flags = (unsigned long) sync_arg;
298 uint32_t seqno = (unsigned long) sync_obj; 296 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
297 (uint32_t) flags);
299 298
300 return vmw_seqno_passed(dev_priv, seqno);
301} 299}
302 300
303static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, 301static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
304 bool lazy, bool interruptible) 302 bool lazy, bool interruptible)
305{ 303{
306 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; 304 unsigned long flags = (unsigned long) sync_arg;
307 uint32_t seqno = (unsigned long) sync_obj;
308 305
309 return vmw_wait_seqno(dev_priv, false, seqno, false, 3*HZ); 306 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
307 (uint32_t) flags,
308 lazy, interruptible,
309 VMW_FENCE_WAIT_TIMEOUT);
310} 310}
311 311
312struct ttm_bo_driver vmw_bo_driver = { 312struct ttm_bo_driver vmw_bo_driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4f65f1e34b8f..d4829cbf326d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -82,12 +82,18 @@
82#define DRM_IOCTL_VMW_EXECBUF \ 82#define DRM_IOCTL_VMW_EXECBUF \
83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \ 83 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
84 struct drm_vmw_execbuf_arg) 84 struct drm_vmw_execbuf_arg)
85#define DRM_IOCTL_VMW_GET_3D_CAP \
86 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
87 struct drm_vmw_get_3d_cap_arg)
85#define DRM_IOCTL_VMW_FENCE_WAIT \ 88#define DRM_IOCTL_VMW_FENCE_WAIT \
86 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \ 89 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
87 struct drm_vmw_fence_wait_arg) 90 struct drm_vmw_fence_wait_arg)
88#define DRM_IOCTL_VMW_GET_3D_CAP \ 91#define DRM_IOCTL_VMW_FENCE_SIGNALED \
89 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \ 92 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
90 struct drm_vmw_get_3d_cap_arg) 93 struct drm_vmw_fence_signaled_arg)
94#define DRM_IOCTL_VMW_FENCE_UNREF \
95 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
96 struct drm_vmw_fence_arg)
91 97
92/** 98/**
93 * The core DRM version of this macro doesn't account for 99 * The core DRM version of this macro doesn't account for
@@ -131,7 +137,12 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
131 DRM_AUTH | DRM_UNLOCKED), 137 DRM_AUTH | DRM_UNLOCKED),
132 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 138 VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
133 DRM_AUTH | DRM_UNLOCKED), 139 DRM_AUTH | DRM_UNLOCKED),
134 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_wait_ioctl, 140 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
141 DRM_AUTH | DRM_UNLOCKED),
142 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
143 vmw_fence_obj_signaled_ioctl,
144 DRM_AUTH | DRM_UNLOCKED),
145 VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
135 DRM_AUTH | DRM_UNLOCKED), 146 DRM_AUTH | DRM_UNLOCKED),
136 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl, 147 VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
137 DRM_AUTH | DRM_UNLOCKED), 148 DRM_AUTH | DRM_UNLOCKED),
@@ -198,12 +209,14 @@ static int vmw_request_device(struct vmw_private *dev_priv)
198 DRM_ERROR("Unable to initialize FIFO.\n"); 209 DRM_ERROR("Unable to initialize FIFO.\n");
199 return ret; 210 return ret;
200 } 211 }
212 vmw_fence_fifo_up(dev_priv->fman);
201 213
202 return 0; 214 return 0;
203} 215}
204 216
205static void vmw_release_device(struct vmw_private *dev_priv) 217static void vmw_release_device(struct vmw_private *dev_priv)
206{ 218{
219 vmw_fence_fifo_down(dev_priv->fman);
207 vmw_fifo_release(dev_priv, &dev_priv->fifo); 220 vmw_fifo_release(dev_priv, &dev_priv->fifo);
208} 221}
209 222
@@ -434,6 +447,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
434 goto out_no_device; 447 goto out_no_device;
435 } 448 }
436 } 449 }
450
451 dev_priv->fman = vmw_fence_manager_init(dev_priv);
452 if (unlikely(dev_priv->fman == NULL))
453 goto out_no_fman;
437 ret = vmw_kms_init(dev_priv); 454 ret = vmw_kms_init(dev_priv);
438 if (unlikely(ret != 0)) 455 if (unlikely(ret != 0))
439 goto out_no_kms; 456 goto out_no_kms;
@@ -475,6 +492,8 @@ out_no_fifo:
475 vmw_overlay_close(dev_priv); 492 vmw_overlay_close(dev_priv);
476 vmw_kms_close(dev_priv); 493 vmw_kms_close(dev_priv);
477out_no_kms: 494out_no_kms:
495 vmw_fence_manager_takedown(dev_priv->fman);
496out_no_fman:
478 if (dev_priv->stealth) 497 if (dev_priv->stealth)
479 pci_release_region(dev->pdev, 2); 498 pci_release_region(dev->pdev, 2);
480 else 499 else
@@ -518,6 +537,7 @@ static int vmw_driver_unload(struct drm_device *dev)
518 } 537 }
519 vmw_kms_close(dev_priv); 538 vmw_kms_close(dev_priv);
520 vmw_overlay_close(dev_priv); 539 vmw_overlay_close(dev_priv);
540 vmw_fence_manager_takedown(dev_priv->fman);
521 if (dev_priv->stealth) 541 if (dev_priv->stealth)
522 pci_release_region(dev->pdev, 2); 542 pci_release_region(dev->pdev, 2);
523 else 543 else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3018871aaaff..770f0636cee8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -38,6 +38,7 @@
38#include "ttm/ttm_lock.h" 38#include "ttm/ttm_lock.h"
39#include "ttm/ttm_execbuf_util.h" 39#include "ttm/ttm_execbuf_util.h"
40#include "ttm/ttm_module.h" 40#include "ttm/ttm_module.h"
41#include "vmwgfx_fence.h"
41 42
42#define VMWGFX_DRIVER_DATE "20100927" 43#define VMWGFX_DRIVER_DATE "20100927"
43#define VMWGFX_DRIVER_MAJOR 1 44#define VMWGFX_DRIVER_MAJOR 1
@@ -53,6 +54,11 @@
53#define VMW_PL_GMR TTM_PL_PRIV0 54#define VMW_PL_GMR TTM_PL_PRIV0
54#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0 55#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
55 56
57#define VMW_RES_CONTEXT ttm_driver_type0
58#define VMW_RES_SURFACE ttm_driver_type1
59#define VMW_RES_STREAM ttm_driver_type2
60#define VMW_RES_FENCE ttm_driver_type3
61
56struct vmw_fpriv { 62struct vmw_fpriv {
57 struct drm_master *locked_master; 63 struct drm_master *locked_master;
58 struct ttm_object_file *tfile; 64 struct ttm_object_file *tfile;
@@ -245,6 +251,7 @@ struct vmw_private {
245 atomic_t fifo_queue_waiters; 251 atomic_t fifo_queue_waiters;
246 uint32_t last_read_seqno; 252 uint32_t last_read_seqno;
247 spinlock_t irq_lock; 253 spinlock_t irq_lock;
254 struct vmw_fence_manager *fman;
248 255
249 /* 256 /*
250 * Device state 257 * Device state
@@ -456,8 +463,6 @@ extern int vmw_irq_postinstall(struct drm_device *dev);
456extern void vmw_irq_uninstall(struct drm_device *dev); 463extern void vmw_irq_uninstall(struct drm_device *dev);
457extern bool vmw_seqno_passed(struct vmw_private *dev_priv, 464extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
458 uint32_t seqno); 465 uint32_t seqno);
459extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
460 struct drm_file *file_priv);
461extern int vmw_fallback_wait(struct vmw_private *dev_priv, 466extern int vmw_fallback_wait(struct vmw_private *dev_priv,
462 bool lazy, 467 bool lazy,
463 bool fifo_idle, 468 bool fifo_idle,
@@ -466,7 +471,8 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
466 unsigned long timeout); 471 unsigned long timeout);
467extern void vmw_update_seqno(struct vmw_private *dev_priv, 472extern void vmw_update_seqno(struct vmw_private *dev_priv,
468 struct vmw_fifo_state *fifo_state); 473 struct vmw_fifo_state *fifo_state);
469 474extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
475extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
470 476
471/** 477/**
472 * Rudimentary fence-like objects currently used only for throttling - 478 * Rudimentary fence-like objects currently used only for throttling -
@@ -572,4 +578,8 @@ static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer
572 return NULL; 578 return NULL;
573} 579}
574 580
581static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
582{
583 return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
584}
575#endif 585#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index be41484735b1..d48ee89a5190 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -256,7 +256,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
256 val_buf = &sw_context->val_bufs[cur_validate_node]; 256 val_buf = &sw_context->val_bufs[cur_validate_node];
257 val_buf->bo = ttm_bo_reference(bo); 257 val_buf->bo = ttm_bo_reference(bo);
258 val_buf->usage = TTM_USAGE_READWRITE; 258 val_buf->usage = TTM_USAGE_READWRITE;
259 val_buf->new_sync_obj_arg = (void *) dev_priv; 259 val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC;
260 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 260 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
261 ++sw_context->cur_val_buf; 261 ++sw_context->cur_val_buf;
262 } 262 }
@@ -321,7 +321,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
321 return 0; 321 return 0;
322} 322}
323 323
324
325static int vmw_cmd_dma(struct vmw_private *dev_priv, 324static int vmw_cmd_dma(struct vmw_private *dev_priv,
326 struct vmw_sw_context *sw_context, 325 struct vmw_sw_context *sw_context,
327 SVGA3dCmdHeader *header) 326 SVGA3dCmdHeader *header)
@@ -676,6 +675,50 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
676 return 0; 675 return 0;
677} 676}
678 677
678/**
679 * vmw_execbuf_fence_commands - create and submit a command stream fence
680 *
681 * Creates a fence object and submits a command stream marker.
682 * If this fails for some reason, We sync the fifo and return NULL.
683 * It is then safe to fence buffers with a NULL pointer.
684 */
685
686int vmw_execbuf_fence_commands(struct drm_file *file_priv,
687 struct vmw_private *dev_priv,
688 struct vmw_fence_obj **p_fence,
689 uint32_t *p_handle)
690{
691 uint32_t sequence;
692 int ret;
693 bool synced = false;
694
695
696 ret = vmw_fifo_send_fence(dev_priv, &sequence);
697 if (unlikely(ret != 0)) {
698 DRM_ERROR("Fence submission error. Syncing.\n");
699 synced = true;
700 }
701
702 if (p_handle != NULL)
703 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
704 sequence,
705 DRM_VMW_FENCE_FLAG_EXEC,
706 p_fence, p_handle);
707 else
708 ret = vmw_fence_create(dev_priv->fman, sequence,
709 DRM_VMW_FENCE_FLAG_EXEC,
710 p_fence);
711
712 if (unlikely(ret != 0 && !synced)) {
713 (void) vmw_fallback_wait(dev_priv, false, false,
714 sequence, false,
715 VMW_FENCE_WAIT_TIMEOUT);
716 *p_fence = NULL;
717 }
718
719 return 0;
720}
721
679int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 722int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
680 struct drm_file *file_priv) 723 struct drm_file *file_priv)
681{ 724{
@@ -686,9 +729,10 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
686 int ret; 729 int ret;
687 void *user_cmd; 730 void *user_cmd;
688 void *cmd; 731 void *cmd;
689 uint32_t seqno;
690 struct vmw_sw_context *sw_context = &dev_priv->ctx; 732 struct vmw_sw_context *sw_context = &dev_priv->ctx;
691 struct vmw_master *vmaster = vmw_master(file_priv->master); 733 struct vmw_master *vmaster = vmw_master(file_priv->master);
734 struct vmw_fence_obj *fence;
735 uint32_t handle;
692 736
693 ret = ttm_read_lock(&vmaster->lock, true); 737 ret = ttm_read_lock(&vmaster->lock, true);
694 if (unlikely(ret != 0)) 738 if (unlikely(ret != 0))
@@ -755,34 +799,60 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
755 memcpy(cmd, sw_context->cmd_bounce, arg->command_size); 799 memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
756 vmw_fifo_commit(dev_priv, arg->command_size); 800 vmw_fifo_commit(dev_priv, arg->command_size);
757 801
758 ret = vmw_fifo_send_fence(dev_priv, &seqno); 802 user_fence_rep = (struct drm_vmw_fence_rep __user *)
759 803 (unsigned long)arg->fence_rep;
760 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, 804 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
761 (void *)(unsigned long) seqno); 805 &fence,
762 vmw_clear_validations(sw_context); 806 (user_fence_rep) ? &handle : NULL);
763 mutex_unlock(&dev_priv->cmdbuf_mutex);
764
765 /* 807 /*
766 * This error is harmless, because if fence submission fails, 808 * This error is harmless, because if fence submission fails,
767 * vmw_fifo_send_fence will sync. 809 * vmw_fifo_send_fence will sync. The error will be propagated to
810 * user-space in @fence_rep
768 */ 811 */
769 812
770 if (ret != 0) 813 if (ret != 0)
771 DRM_ERROR("Fence submission error. Syncing.\n"); 814 DRM_ERROR("Fence submission error. Syncing.\n");
772 815
773 fence_rep.error = ret; 816 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
774 fence_rep.fence_seq = (uint64_t) seqno; 817 (void *) fence);
775 fence_rep.pad64 = 0;
776 818
777 user_fence_rep = (struct drm_vmw_fence_rep __user *) 819 vmw_clear_validations(sw_context);
778 (unsigned long)arg->fence_rep; 820 mutex_unlock(&dev_priv->cmdbuf_mutex);
779 821
780 /* 822 if (user_fence_rep) {
781 * copy_to_user errors will be detected by user space not 823 fence_rep.error = ret;
782 * seeing fence_rep::error filled in. 824 fence_rep.handle = handle;
783 */ 825 fence_rep.seqno = fence->seqno;
826 vmw_update_seqno(dev_priv, &dev_priv->fifo);
827 fence_rep.passed_seqno = dev_priv->last_read_seqno;
828
829 /*
830 * copy_to_user errors will be detected by user space not
831 * seeing fence_rep::error filled in. Typically
832 * user-space would have pre-set that member to -EFAULT.
833 */
834 ret = copy_to_user(user_fence_rep, &fence_rep,
835 sizeof(fence_rep));
836
837 /*
838 * User-space lost the fence object. We need to sync
839 * and unreference the handle.
840 */
841 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
842 BUG_ON(fence == NULL);
843
844 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
845 handle, TTM_REF_USAGE);
846 DRM_ERROR("Fence copy error. Syncing.\n");
847 (void) vmw_fence_obj_wait(fence,
848 fence->signal_mask,
849 false, false,
850 VMW_FENCE_WAIT_TIMEOUT);
851 }
852 }
784 853
785 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep)); 854 if (likely(fence != NULL))
855 vmw_fence_obj_unreference(&fence);
786 856
787 vmw_kms_cursor_post_execbuf(dev_priv); 857 vmw_kms_cursor_post_execbuf(dev_priv);
788 ttm_read_unlock(&vmaster->lock); 858 ttm_read_unlock(&vmaster->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
new file mode 100644
index 000000000000..5065a140fdf8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -0,0 +1,619 @@
1/**************************************************************************
2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 31)
32
33struct vmw_fence_manager {
34 int num_fence_objects;
35 struct vmw_private *dev_priv;
36 spinlock_t lock;
37 u32 next_seqno;
38 struct list_head fence_list;
39 struct work_struct work;
40 u32 user_fence_size;
41 u32 fence_size;
42 bool fifo_down;
43 struct list_head cleanup_list;
44};
45
46struct vmw_user_fence {
47 struct ttm_base_object base;
48 struct vmw_fence_obj fence;
49};
50
51/**
52 * vmw_fence_destroy_locked
53 *
54 */
55
56static void vmw_fence_obj_destroy_locked(struct kref *kref)
57{
58 struct vmw_fence_obj *fence =
59 container_of(kref, struct vmw_fence_obj, kref);
60
61 struct vmw_fence_manager *fman = fence->fman;
62 unsigned int num_fences;
63
64 list_del_init(&fence->head);
65 num_fences = --fman->num_fence_objects;
66 spin_unlock_irq(&fman->lock);
67 if (fence->destroy)
68 fence->destroy(fence);
69 else
70 kfree(fence);
71
72 spin_lock_irq(&fman->lock);
73}
74
75
76/**
77 * Execute signal actions on fences recently signaled.
78 * This is done from a workqueue so we don't have to execute
79 * signal actions from atomic context.
80 */
81
82static void vmw_fence_work_func(struct work_struct *work)
83{
84 struct vmw_fence_manager *fman =
85 container_of(work, struct vmw_fence_manager, work);
86 struct list_head list;
87 struct vmw_fence_action *action, *next_action;
88
89 do {
90 INIT_LIST_HEAD(&list);
91 spin_lock_irq(&fman->lock);
92 list_splice_init(&fman->cleanup_list, &list);
93 spin_unlock_irq(&fman->lock);
94
95 if (list_empty(&list))
96 return;
97
98 /*
99 * At this point, only we should be able to manipulate the
100 * list heads of the actions we have on the private list.
101 */
102
103 list_for_each_entry_safe(action, next_action, &list, head) {
104 list_del_init(&action->head);
105 action->cleanup(action);
106 }
107 } while (1);
108}
109
110struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
111{
112 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
113
114 if (unlikely(fman == NULL))
115 return NULL;
116
117 fman->dev_priv = dev_priv;
118 spin_lock_init(&fman->lock);
119 INIT_LIST_HEAD(&fman->fence_list);
120 INIT_LIST_HEAD(&fman->cleanup_list);
121 INIT_WORK(&fman->work, &vmw_fence_work_func);
122 fman->fifo_down = true;
123 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
124 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
125
126 return fman;
127}
128
129void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
130{
131 unsigned long irq_flags;
132 bool lists_empty;
133
134 (void) cancel_work_sync(&fman->work);
135
136 spin_lock_irqsave(&fman->lock, irq_flags);
137 lists_empty = list_empty(&fman->fence_list) &&
138 list_empty(&fman->cleanup_list);
139 spin_unlock_irqrestore(&fman->lock, irq_flags);
140
141 BUG_ON(!lists_empty);
142 kfree(fman);
143}
144
145static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
146 struct vmw_fence_obj *fence,
147 u32 seqno,
148 uint32_t mask,
149 void (*destroy) (struct vmw_fence_obj *fence))
150{
151 unsigned long irq_flags;
152 unsigned int num_fences;
153 int ret = 0;
154
155 fence->seqno = seqno;
156 INIT_LIST_HEAD(&fence->seq_passed_actions);
157 fence->fman = fman;
158 fence->signaled = 0;
159 fence->signal_mask = mask;
160 kref_init(&fence->kref);
161 fence->destroy = destroy;
162 init_waitqueue_head(&fence->queue);
163
164 spin_lock_irqsave(&fman->lock, irq_flags);
165 if (unlikely(fman->fifo_down)) {
166 ret = -EBUSY;
167 goto out_unlock;
168 }
169 list_add_tail(&fence->head, &fman->fence_list);
170 num_fences = ++fman->num_fence_objects;
171
172out_unlock:
173 spin_unlock_irqrestore(&fman->lock, irq_flags);
174 return ret;
175
176}
177
178struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
179{
180 kref_get(&fence->kref);
181 return fence;
182}
183
184/**
185 * vmw_fence_obj_unreference
186 *
187 * Note that this function may not be entered with disabled irqs since
188 * it may re-enable them in the destroy function.
189 *
190 */
191void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
192{
193 struct vmw_fence_obj *fence = *fence_p;
194 struct vmw_fence_manager *fman = fence->fman;
195
196 *fence_p = NULL;
197 spin_lock_irq(&fman->lock);
198 BUG_ON(atomic_read(&fence->kref.refcount) == 0);
199 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
200 spin_unlock_irq(&fman->lock);
201}
202
203void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
204 struct list_head *list)
205{
206 struct vmw_fence_action *action, *next_action;
207
208 list_for_each_entry_safe(action, next_action, list, head) {
209 list_del_init(&action->head);
210 if (action->seq_passed != NULL)
211 action->seq_passed(action);
212
213 /*
214 * Add the cleanup action to the cleanup list so that
215 * it will be performed by a worker task.
216 */
217
218 if (action->cleanup != NULL)
219 list_add_tail(&action->head, &fman->cleanup_list);
220 }
221}
222
223void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
224{
225 unsigned long flags;
226 struct vmw_fence_obj *fence, *next_fence;
227 struct list_head action_list;
228
229 spin_lock_irqsave(&fman->lock, flags);
230 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
231 if (seqno - fence->seqno < VMW_FENCE_WRAP) {
232 list_del_init(&fence->head);
233 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
234 INIT_LIST_HEAD(&action_list);
235 list_splice_init(&fence->seq_passed_actions,
236 &action_list);
237 vmw_fences_perform_actions(fman, &action_list);
238 wake_up_all(&fence->queue);
239 }
240
241 }
242 if (!list_empty(&fman->cleanup_list))
243 (void) schedule_work(&fman->work);
244 spin_unlock_irqrestore(&fman->lock, flags);
245}
246
247
248bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
249 uint32_t flags)
250{
251 struct vmw_fence_manager *fman = fence->fman;
252 unsigned long irq_flags;
253 uint32_t signaled;
254
255 spin_lock_irqsave(&fman->lock, irq_flags);
256 signaled = fence->signaled;
257 spin_unlock_irqrestore(&fman->lock, irq_flags);
258
259 flags &= fence->signal_mask;
260 if ((signaled & flags) == flags)
261 return 1;
262
263 if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) {
264 struct vmw_private *dev_priv = fman->dev_priv;
265 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
266 u32 seqno;
267
268 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
269 vmw_fences_update(fman, seqno);
270 }
271
272 spin_lock_irqsave(&fman->lock, irq_flags);
273 signaled = fence->signaled;
274 spin_unlock_irqrestore(&fman->lock, irq_flags);
275
276 return ((signaled & flags) == flags);
277}
278
279int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
280 uint32_t flags, bool lazy,
281 bool interruptible, unsigned long timeout)
282{
283 struct vmw_private *dev_priv = fence->fman->dev_priv;
284 long ret;
285
286 if (likely(vmw_fence_obj_signaled(fence, flags)))
287 return 0;
288
289 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
290 vmw_seqno_waiter_add(dev_priv);
291
292 if (interruptible)
293 ret = wait_event_interruptible_timeout
294 (fence->queue,
295 vmw_fence_obj_signaled(fence, flags),
296 timeout);
297 else
298 ret = wait_event_timeout
299 (fence->queue,
300 vmw_fence_obj_signaled(fence, flags),
301 timeout);
302
303 vmw_seqno_waiter_remove(dev_priv);
304
305 if (unlikely(ret == 0))
306 ret = -EBUSY;
307 else if (likely(ret > 0))
308 ret = 0;
309
310 return ret;
311}
312
313void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
314{
315 struct vmw_private *dev_priv = fence->fman->dev_priv;
316
317 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
318}
319
320static void vmw_fence_destroy(struct vmw_fence_obj *fence)
321{
322 struct vmw_fence_manager *fman = fence->fman;
323
324 kfree(fence);
325 /*
326 * Free kernel space accounting.
327 */
328 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
329 fman->fence_size);
330}
331
332int vmw_fence_create(struct vmw_fence_manager *fman,
333 uint32_t seqno,
334 uint32_t mask,
335 struct vmw_fence_obj **p_fence)
336{
337 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
338 struct vmw_fence_obj *fence;
339 int ret;
340
341 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
342 false, false);
343 if (unlikely(ret != 0))
344 return ret;
345
346 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
347 if (unlikely(fence == NULL)) {
348 ret = -ENOMEM;
349 goto out_no_object;
350 }
351
352 ret = vmw_fence_obj_init(fman, fence, seqno, mask,
353 vmw_fence_destroy);
354 if (unlikely(ret != 0))
355 goto out_err_init;
356
357 *p_fence = fence;
358 return 0;
359
360out_err_init:
361 kfree(fence);
362out_no_object:
363 ttm_mem_global_free(mem_glob, fman->fence_size);
364 return ret;
365}
366
367
368static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
369{
370 struct vmw_user_fence *ufence =
371 container_of(fence, struct vmw_user_fence, fence);
372 struct vmw_fence_manager *fman = fence->fman;
373
374 kfree(ufence);
375 /*
376 * Free kernel space accounting.
377 */
378 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
379 fman->user_fence_size);
380}
381
382static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
383{
384 struct ttm_base_object *base = *p_base;
385 struct vmw_user_fence *ufence =
386 container_of(base, struct vmw_user_fence, base);
387 struct vmw_fence_obj *fence = &ufence->fence;
388
389 *p_base = NULL;
390 vmw_fence_obj_unreference(&fence);
391}
392
393int vmw_user_fence_create(struct drm_file *file_priv,
394 struct vmw_fence_manager *fman,
395 uint32_t seqno,
396 uint32_t mask,
397 struct vmw_fence_obj **p_fence,
398 uint32_t *p_handle)
399{
400 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
401 struct vmw_user_fence *ufence;
402 struct vmw_fence_obj *tmp;
403 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
404 int ret;
405
406 /*
407 * Kernel memory space accounting, since this object may
408 * be created by a user-space request.
409 */
410
411 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
412 false, false);
413 if (unlikely(ret != 0))
414 return ret;
415
416 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
417 if (unlikely(ufence == NULL)) {
418 ret = -ENOMEM;
419 goto out_no_object;
420 }
421
422 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
423 mask, vmw_user_fence_destroy);
424 if (unlikely(ret != 0)) {
425 kfree(ufence);
426 goto out_no_object;
427 }
428
429 /*
430 * The base object holds a reference which is freed in
431 * vmw_user_fence_base_release.
432 */
433 tmp = vmw_fence_obj_reference(&ufence->fence);
434 ret = ttm_base_object_init(tfile, &ufence->base, false,
435 VMW_RES_FENCE,
436 &vmw_user_fence_base_release, NULL);
437
438
439 if (unlikely(ret != 0)) {
440 /*
441 * Free the base object's reference
442 */
443 vmw_fence_obj_unreference(&tmp);
444 goto out_err;
445 }
446
447 *p_fence = &ufence->fence;
448 *p_handle = ufence->base.hash.key;
449
450 return 0;
451out_err:
452 tmp = &ufence->fence;
453 vmw_fence_obj_unreference(&tmp);
454out_no_object:
455 ttm_mem_global_free(mem_glob, fman->user_fence_size);
456 return ret;
457}
458
459
460/**
461 * vmw_fence_fifo_down - signal all unsignaled fence objects.
462 */
463
464void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
465{
466 unsigned long irq_flags;
467 struct list_head action_list;
468 int ret;
469
470 /*
471 * The list may be altered while we traverse it, so always
472 * restart when we've released the fman->lock.
473 */
474
475 spin_lock_irqsave(&fman->lock, irq_flags);
476 fman->fifo_down = true;
477 while (!list_empty(&fman->fence_list)) {
478 struct vmw_fence_obj *fence =
479 list_entry(fman->fence_list.prev, struct vmw_fence_obj,
480 head);
481 kref_get(&fence->kref);
482 spin_unlock_irq(&fman->lock);
483
484 ret = vmw_fence_obj_wait(fence, fence->signal_mask,
485 false, false,
486 VMW_FENCE_WAIT_TIMEOUT);
487
488 if (unlikely(ret != 0)) {
489 list_del_init(&fence->head);
490 fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
491 INIT_LIST_HEAD(&action_list);
492 list_splice_init(&fence->seq_passed_actions,
493 &action_list);
494 vmw_fences_perform_actions(fman, &action_list);
495 wake_up_all(&fence->queue);
496 }
497
498 spin_lock_irq(&fman->lock);
499
500 BUG_ON(!list_empty(&fence->head));
501 kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
502 }
503 spin_unlock_irqrestore(&fman->lock, irq_flags);
504}
505
506void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
507{
508 unsigned long irq_flags;
509
510 spin_lock_irqsave(&fman->lock, irq_flags);
511 fman->fifo_down = false;
512 spin_unlock_irqrestore(&fman->lock, irq_flags);
513}
514
515
516int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
517 struct drm_file *file_priv)
518{
519 struct drm_vmw_fence_wait_arg *arg =
520 (struct drm_vmw_fence_wait_arg *)data;
521 unsigned long timeout;
522 struct ttm_base_object *base;
523 struct vmw_fence_obj *fence;
524 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
525 int ret;
526 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
527
528 /*
529 * 64-bit division not present on 32-bit systems, so do an
530 * approximation. (Divide by 1000000).
531 */
532
533 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
534 (wait_timeout >> 26);
535
536 if (!arg->cookie_valid) {
537 arg->cookie_valid = 1;
538 arg->kernel_cookie = jiffies + wait_timeout;
539 }
540
541 base = ttm_base_object_lookup(tfile, arg->handle);
542 if (unlikely(base == NULL)) {
543 printk(KERN_ERR "Wait invalid fence object handle "
544 "0x%08lx.\n",
545 (unsigned long)arg->handle);
546 return -EINVAL;
547 }
548
549 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
550
551 timeout = jiffies;
552 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
553 ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
554 0 : -EBUSY);
555 goto out;
556 }
557
558 timeout = (unsigned long)arg->kernel_cookie - timeout;
559
560 ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
561
562out:
563 ttm_base_object_unref(&base);
564
565 /*
566 * Optionally unref the fence object.
567 */
568
569 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
570 return ttm_ref_object_base_unref(tfile, arg->handle,
571 TTM_REF_USAGE);
572 return ret;
573}
574
575int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *file_priv)
577{
578 struct drm_vmw_fence_signaled_arg *arg =
579 (struct drm_vmw_fence_signaled_arg *) data;
580 struct ttm_base_object *base;
581 struct vmw_fence_obj *fence;
582 struct vmw_fence_manager *fman;
583 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
584 struct vmw_private *dev_priv = vmw_priv(dev);
585
586 base = ttm_base_object_lookup(tfile, arg->handle);
587 if (unlikely(base == NULL)) {
588 printk(KERN_ERR "Fence signaled invalid fence object handle "
589 "0x%08lx.\n",
590 (unsigned long)arg->handle);
591 return -EINVAL;
592 }
593
594 fence = &(container_of(base, struct vmw_user_fence, base)->fence);
595 fman = fence->fman;
596
597 arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
598 spin_lock_irq(&fman->lock);
599
600 arg->signaled_flags = fence->signaled;
601 arg->passed_seqno = dev_priv->last_read_seqno;
602 spin_unlock_irq(&fman->lock);
603
604 ttm_base_object_unref(&base);
605
606 return 0;
607}
608
609
610int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
611 struct drm_file *file_priv)
612{
613 struct drm_vmw_fence_arg *arg =
614 (struct drm_vmw_fence_arg *) data;
615
616 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
617 arg->handle,
618 TTM_REF_USAGE);
619}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
new file mode 100644
index 000000000000..93074064aaf3
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -0,0 +1,105 @@
1/**************************************************************************
2 *
3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_FENCE_H_
29
30#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
31
32struct vmw_private;
33
34struct vmw_fence_manager;
35
36/**
37 *
38 *
39 */
40struct vmw_fence_action {
41 struct list_head head;
42 void (*seq_passed) (struct vmw_fence_action *action);
43 void (*cleanup) (struct vmw_fence_action *action);
44};
45
46struct vmw_fence_obj {
47 struct kref kref;
48 u32 seqno;
49
50 struct vmw_fence_manager *fman;
51 struct list_head head;
52 uint32_t signaled;
53 uint32_t signal_mask;
54 struct list_head seq_passed_actions;
55 void (*destroy)(struct vmw_fence_obj *fence);
56 wait_queue_head_t queue;
57};
58
59extern struct vmw_fence_manager *
60vmw_fence_manager_init(struct vmw_private *dev_priv);
61
62extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
63
64extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
65
66extern struct vmw_fence_obj *
67vmw_fence_obj_reference(struct vmw_fence_obj *fence);
68
69extern void vmw_fences_update(struct vmw_fence_manager *fman,
70 u32 sequence);
71
72extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
73 uint32_t flags);
74
75extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
76 bool lazy,
77 bool interruptible, unsigned long timeout);
78
79extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
80
81extern int vmw_fence_create(struct vmw_fence_manager *fman,
82 uint32_t seqno,
83 uint32_t mask,
84 struct vmw_fence_obj **p_fence);
85
86extern int vmw_user_fence_create(struct drm_file *file_priv,
87 struct vmw_fence_manager *fman,
88 uint32_t sequence,
89 uint32_t mask,
90 struct vmw_fence_obj **p_fence,
91 uint32_t *p_handle);
92
93extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
94
95extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
96
97extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
98 struct drm_file *file_priv);
99
100extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
101 struct drm_file *file_priv);
102
103extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
104 struct drm_file *file_priv);
105#endif /* _VMWGFX_FENCE_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 13dde06b60be..a005292a8908 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -40,8 +40,13 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock); 41 spin_unlock(&dev_priv->irq_lock);
42 42
43 if (status & SVGA_IRQFLAG_ANY_FENCE) 43 if (status & SVGA_IRQFLAG_ANY_FENCE) {
44 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
45 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
46
47 vmw_fences_update(dev_priv->fman, seqno);
44 wake_up_all(&dev_priv->fence_queue); 48 wake_up_all(&dev_priv->fence_queue);
49 }
45 if (status & SVGA_IRQFLAG_FIFO_PROGRESS) 50 if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
46 wake_up_all(&dev_priv->fifo_queue); 51 wake_up_all(&dev_priv->fifo_queue);
47 52
@@ -68,12 +73,12 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state) 73 struct vmw_fifo_state *fifo_state)
69{ 74{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 75 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71
72 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73 77
74 if (dev_priv->last_read_seqno != seqno) { 78 if (dev_priv->last_read_seqno != seqno) {
75 dev_priv->last_read_seqno = seqno; 79 dev_priv->last_read_seqno = seqno;
76 vmw_marker_pull(&fifo_state->marker_queue, seqno); 80 vmw_marker_pull(&fifo_state->marker_queue, seqno);
81 vmw_fences_update(dev_priv->fman, seqno);
77 } 82 }
78} 83}
79 84
@@ -175,7 +180,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
175 return ret; 180 return ret;
176} 181}
177 182
178static void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 183void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
179{ 184{
180 mutex_lock(&dev_priv->hw_mutex); 185 mutex_lock(&dev_priv->hw_mutex);
181 if (dev_priv->fence_queue_waiters++ == 0) { 186 if (dev_priv->fence_queue_waiters++ == 0) {
@@ -192,7 +197,7 @@ static void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
192 mutex_unlock(&dev_priv->hw_mutex); 197 mutex_unlock(&dev_priv->hw_mutex);
193} 198}
194 199
195static void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 200void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
196{ 201{
197 mutex_lock(&dev_priv->hw_mutex); 202 mutex_lock(&dev_priv->hw_mutex);
198 if (--dev_priv->fence_queue_waiters == 0) { 203 if (--dev_priv->fence_queue_waiters == 0) {
@@ -286,25 +291,3 @@ void vmw_irq_uninstall(struct drm_device *dev)
286 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 291 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
287 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 292 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
288} 293}
289
290#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
291
292int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *file_priv)
294{
295 struct drm_vmw_fence_wait_arg *arg =
296 (struct drm_vmw_fence_wait_arg *)data;
297 unsigned long timeout;
298
299 if (!arg->cookie_valid) {
300 arg->cookie_valid = 1;
301 arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
302 }
303
304 timeout = jiffies;
305 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
306 return -EBUSY;
307
308 timeout = (unsigned long)arg->kernel_cookie - timeout;
309 return vmw_wait_seqno(vmw_priv(dev), true, arg->seqno, true, timeout);
310}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 4b53803d0fa9..c1b6ffd4ce7b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -31,10 +31,6 @@
31#include "ttm/ttm_placement.h" 31#include "ttm/ttm_placement.h"
32#include "drmP.h" 32#include "drmP.h"
33 33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
38struct vmw_user_context { 34struct vmw_user_context {
39 struct ttm_base_object base; 35 struct ttm_base_object base;
40 struct vmw_resource res; 36 struct vmw_resource res;
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index c2b3909ac50a..763a7a3885a1 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -48,8 +48,12 @@
48#define DRM_VMW_UNREF_SURFACE 10 48#define DRM_VMW_UNREF_SURFACE 10
49#define DRM_VMW_REF_SURFACE 11 49#define DRM_VMW_REF_SURFACE 11
50#define DRM_VMW_EXECBUF 12 50#define DRM_VMW_EXECBUF 12
51#define DRM_VMW_FENCE_WAIT 13 51#define DRM_VMW_GET_3D_CAP 13
52#define DRM_VMW_GET_3D_CAP 14 52#define DRM_VMW_FENCE_WAIT 14
53#define DRM_VMW_FENCE_SIGNALED 15
54#define DRM_VMW_FENCE_UNREF 16
55#define DRM_VMW_FENCE_EVENT 17
56
53 57
54/*************************************************************************/ 58/*************************************************************************/
55/** 59/**
@@ -318,14 +322,23 @@ struct drm_vmw_execbuf_arg {
318 uint32_t command_size; 322 uint32_t command_size;
319 uint32_t throttle_us; 323 uint32_t throttle_us;
320 uint64_t fence_rep; 324 uint64_t fence_rep;
321 uint32_t version; 325 uint32_t version;
322 uint32_t flags; 326 uint32_t flags;
323}; 327};
324 328
325/** 329/**
326 * struct drm_vmw_fence_rep 330 * struct drm_vmw_fence_rep
327 * 331 *
328 * @fence_seq: Fence seqno associated with a command submission. 332 * @handle: Fence object handle for fence associated with a command submission.
333 * @mask: Fence flags relevant for this fence object.
334 * @seqno: Fence sequence number in fifo. A fence object with a lower
335 * seqno will signal the EXEC flag before a fence object with a higher
336 * seqno. This can be used by user-space to avoid kernel calls to determine
337 * whether a fence has signaled the EXEC flag. Note that @seqno will
338 * wrap at 32-bit.
339 * @passed_seqno: The highest seqno number processed by the hardware
340 * so far. This can be used to mark user-space fence objects as signaled, and
341 * to determine whether a fence seqno might be stale.
329 * @error: This member should've been set to -EFAULT on submission. 342 * @error: This member should've been set to -EFAULT on submission.
330 * The following actions should be take on completion: 343 * The following actions should be take on completion:
331 * error == -EFAULT: Fence communication failed. The host is synchronized. 344 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -339,9 +352,12 @@ struct drm_vmw_execbuf_arg {
339 */ 352 */
340 353
341struct drm_vmw_fence_rep { 354struct drm_vmw_fence_rep {
342 uint64_t fence_seq; 355 uint32_t handle;
343 int32_t error; 356 uint32_t mask;
357 uint32_t seqno;
358 uint32_t passed_seqno;
344 uint32_t pad64; 359 uint32_t pad64;
360 int32_t error;
345}; 361};
346 362
347/*************************************************************************/ 363/*************************************************************************/
@@ -430,14 +446,6 @@ struct drm_vmw_unref_dmabuf_arg {
430 uint32_t pad64; 446 uint32_t pad64;
431}; 447};
432 448
433
434struct drm_vmw_fence_wait_arg {
435 uint64_t seqno;
436 uint64_t kernel_cookie;
437 int32_t cookie_valid;
438 int32_t pad64;
439};
440
441/*************************************************************************/ 449/*************************************************************************/
442/** 450/**
443 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams. 451 * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
@@ -559,6 +567,7 @@ struct drm_vmw_stream_arg {
559 * Return a single stream that was claimed by this process. Also makes 567 * Return a single stream that was claimed by this process. Also makes
560 * sure that the stream has been stopped. 568 * sure that the stream has been stopped.
561 */ 569 */
570
562/*************************************************************************/ 571/*************************************************************************/
563/** 572/**
564 * DRM_VMW_GET_3D_CAP 573 * DRM_VMW_GET_3D_CAP
@@ -607,4 +616,114 @@ struct drm_vmw_update_layout_arg {
607 uint64_t rects; 616 uint64_t rects;
608}; 617};
609 618
619
620/*************************************************************************/
621/**
622 * DRM_VMW_FENCE_WAIT
623 *
624 * Waits for a fence object to signal. The wait is interruptible, so that
625 * signals may be delivered during the interrupt. The wait may timeout,
626 * in which case the calls returns -EBUSY. If the wait is restarted,
627 * that is restarting without resetting @cookie_valid to zero,
628 * the timeout is computed from the first call.
629 *
630 * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
631 * on:
632 * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
633 * stream
634 * have executed.
635 * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
636 * commands
637 * in the buffer given to the EXECBUF ioctl returning the fence object handle
638 * are available to user-space.
639 *
640 * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
641 * fenc wait ioctl returns 0, the fence object has been unreferenced after
642 * the wait.
643 */
644
645#define DRM_VMW_FENCE_FLAG_EXEC (1 << 0)
646#define DRM_VMW_FENCE_FLAG_QUERY (1 << 1)
647
648#define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
649
650/**
651 * struct drm_vmw_fence_wait_arg
652 *
653 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
654 * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
655 * @kernel_cookie: Set to 0 on first call. Left alone on restart.
656 * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
657 * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
658 * before returning.
659 * @flags: Fence flags to wait on.
660 * @wait_options: Options that control the behaviour of the wait ioctl.
661 *
662 * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
663 */
664
665struct drm_vmw_fence_wait_arg {
666 uint32_t handle;
667 int32_t cookie_valid;
668 uint64_t kernel_cookie;
669 uint64_t timeout_us;
670 int32_t lazy;
671 int32_t flags;
672 int32_t wait_options;
673 int32_t pad64;
674};
675
676/*************************************************************************/
677/**
678 * DRM_VMW_FENCE_SIGNALED
679 *
680 * Checks if a fence object is signaled..
681 */
682
683/**
684 * struct drm_vmw_fence_signaled_arg
685 *
686 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
687 * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
688 * @signaled: Out: Flags signaled.
689 * @sequence: Out: Highest sequence passed so far. Can be used to signal the
690 * EXEC flag of user-space fence objects.
691 *
692 * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
693 * ioctls.
694 */
695
696struct drm_vmw_fence_signaled_arg {
697 uint32_t handle;
698 uint32_t flags;
699 int32_t signaled;
700 uint32_t passed_seqno;
701 uint32_t signaled_flags;
702 uint32_t pad64;
703};
704
705/*************************************************************************/
706/**
707 * DRM_VMW_FENCE_UNREF
708 *
709 * Unreferences a fence object, and causes it to be destroyed if there are no
710 * other references to it.
711 *
712 */
713
714/**
715 * struct drm_vmw_fence_arg
716 *
717 * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
718 *
719 * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
720 */
721
722struct drm_vmw_fence_arg {
723 uint32_t handle;
724 uint32_t pad64;
725};
726
727
728
610#endif 729#endif