aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-09-01 16:18:42 -0400
committerDave Airlie <airlied@redhat.com>2011-09-06 06:48:40 -0400
commit6bcd8d3c782b7b2c98c8f414a6bb43cf6b84e53c (patch)
treeec6f664be6a2ff92e0512494c222af6e2776130b
parentf63f6a59d3905ac73aeeb617b27ac31516549ed9 (diff)
vmwgfx: Fix confusion caused by using "fence" in various places
This is needed before we introduce the fence objects. Otherwise this will be even more confusing. The plan is to use the following: seqno: A 32-bit sequence number that may be passed in the fifo. marker: Objects, carrying a seqno, that track fifo submission time. They are used for fifo lag based throttling. fence objects: Kernel space objects, possibly accessible from user-space and carrying a 32-bit seqno together with signaled status. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c56
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_marker.c (renamed from drivers/gpu/drm/vmwgfx/vmwgfx_fence.c)70
-rw-r--r--include/drm/vmwgfx_drm.h6
9 files changed, 110 insertions, 112 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index c9281a1b1d3b..f41e8b499978 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ 4vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o
8 8
9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 9obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 87e43e0733bf..72d95617bc59 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -295,18 +295,18 @@ static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
295static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) 295static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
296{ 296{
297 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; 297 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
298 uint32_t sequence = (unsigned long) sync_obj; 298 uint32_t seqno = (unsigned long) sync_obj;
299 299
300 return vmw_fence_signaled(dev_priv, sequence); 300 return vmw_seqno_passed(dev_priv, seqno);
301} 301}
302 302
303static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, 303static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
304 bool lazy, bool interruptible) 304 bool lazy, bool interruptible)
305{ 305{
306 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg; 306 struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
307 uint32_t sequence = (unsigned long) sync_obj; 307 uint32_t seqno = (unsigned long) sync_obj;
308 308
309 return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ); 309 return vmw_wait_seqno(dev_priv, false, seqno, false, 3*HZ);
310} 310}
311 311
312struct ttm_bo_driver vmw_bo_driver = { 312struct ttm_bo_driver vmw_bo_driver = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8010254e9cf9..c8b5a53f140b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -280,7 +280,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
280 280
281 dev_priv->dev = dev; 281 dev_priv->dev = dev;
282 dev_priv->vmw_chipset = chipset; 282 dev_priv->vmw_chipset = chipset;
283 dev_priv->last_read_sequence = (uint32_t) -100; 283 dev_priv->last_read_seqno = (uint32_t) -100;
284 mutex_init(&dev_priv->hw_mutex); 284 mutex_init(&dev_priv->hw_mutex);
285 mutex_init(&dev_priv->cmdbuf_mutex); 285 mutex_init(&dev_priv->cmdbuf_mutex);
286 mutex_init(&dev_priv->release_mutex); 286 mutex_init(&dev_priv->release_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 2374a5c495f2..9c3016b53eac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -105,7 +105,7 @@ struct vmw_surface {
105 struct vmw_cursor_snooper snooper; 105 struct vmw_cursor_snooper snooper;
106}; 106};
107 107
108struct vmw_fence_queue { 108struct vmw_marker_queue {
109 struct list_head head; 109 struct list_head head;
110 struct timespec lag; 110 struct timespec lag;
111 struct timespec lag_time; 111 struct timespec lag_time;
@@ -121,7 +121,7 @@ struct vmw_fifo_state {
121 uint32_t capabilities; 121 uint32_t capabilities;
122 struct mutex fifo_mutex; 122 struct mutex fifo_mutex;
123 struct rw_semaphore rwsem; 123 struct rw_semaphore rwsem;
124 struct vmw_fence_queue fence_queue; 124 struct vmw_marker_queue marker_queue;
125}; 125};
126 126
127struct vmw_relocation { 127struct vmw_relocation {
@@ -238,12 +238,12 @@ struct vmw_private {
238 * Fencing and IRQs. 238 * Fencing and IRQs.
239 */ 239 */
240 240
241 atomic_t fence_seq; 241 atomic_t marker_seq;
242 wait_queue_head_t fence_queue; 242 wait_queue_head_t fence_queue;
243 wait_queue_head_t fifo_queue; 243 wait_queue_head_t fifo_queue;
244 atomic_t fence_queue_waiters; 244 atomic_t fence_queue_waiters;
245 atomic_t fifo_queue_waiters; 245 atomic_t fifo_queue_waiters;
246 uint32_t last_read_sequence; 246 uint32_t last_read_seqno;
247 spinlock_t irq_lock; 247 spinlock_t irq_lock;
248 248
249 /* 249 /*
@@ -411,7 +411,7 @@ extern void vmw_fifo_release(struct vmw_private *dev_priv,
411extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 411extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
412extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 412extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
413extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 413extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
414 uint32_t *sequence); 414 uint32_t *seqno);
415extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); 415extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
416extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); 416extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
417extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); 417extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -448,39 +448,39 @@ extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
448 */ 448 */
449 449
450extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS); 450extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
451extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy, 451extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
452 uint32_t sequence, bool interruptible, 452 uint32_t seqno, bool interruptible,
453 unsigned long timeout); 453 unsigned long timeout);
454extern void vmw_irq_preinstall(struct drm_device *dev); 454extern void vmw_irq_preinstall(struct drm_device *dev);
455extern int vmw_irq_postinstall(struct drm_device *dev); 455extern int vmw_irq_postinstall(struct drm_device *dev);
456extern void vmw_irq_uninstall(struct drm_device *dev); 456extern void vmw_irq_uninstall(struct drm_device *dev);
457extern bool vmw_fence_signaled(struct vmw_private *dev_priv, 457extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
458 uint32_t sequence); 458 uint32_t seqno);
459extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, 459extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
460 struct drm_file *file_priv); 460 struct drm_file *file_priv);
461extern int vmw_fallback_wait(struct vmw_private *dev_priv, 461extern int vmw_fallback_wait(struct vmw_private *dev_priv,
462 bool lazy, 462 bool lazy,
463 bool fifo_idle, 463 bool fifo_idle,
464 uint32_t sequence, 464 uint32_t seqno,
465 bool interruptible, 465 bool interruptible,
466 unsigned long timeout); 466 unsigned long timeout);
467extern void vmw_update_sequence(struct vmw_private *dev_priv, 467extern void vmw_update_seqno(struct vmw_private *dev_priv,
468 struct vmw_fifo_state *fifo_state); 468 struct vmw_fifo_state *fifo_state);
469 469
470 470
471/** 471/**
472 * Rudimentary fence objects currently used only for throttling - 472 * Rudimentary fence-like objects currently used only for throttling -
473 * vmwgfx_fence.c 473 * vmwgfx_marker.c
474 */ 474 */
475 475
476extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); 476extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
477extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); 477extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
478extern int vmw_fence_push(struct vmw_fence_queue *queue, 478extern int vmw_marker_push(struct vmw_marker_queue *queue,
479 uint32_t sequence); 479 uint32_t seqno);
480extern int vmw_fence_pull(struct vmw_fence_queue *queue, 480extern int vmw_marker_pull(struct vmw_marker_queue *queue,
481 uint32_t signaled_sequence); 481 uint32_t signaled_seqno);
482extern int vmw_wait_lag(struct vmw_private *dev_priv, 482extern int vmw_wait_lag(struct vmw_private *dev_priv,
483 struct vmw_fence_queue *queue, uint32_t us); 483 struct vmw_marker_queue *queue, uint32_t us);
484 484
485/** 485/**
486 * Kernel framebuffer - vmwgfx_fb.c 486 * Kernel framebuffer - vmwgfx_fb.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index c6ff0e40f201..be41484735b1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -686,7 +686,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
686 int ret; 686 int ret;
687 void *user_cmd; 687 void *user_cmd;
688 void *cmd; 688 void *cmd;
689 uint32_t sequence; 689 uint32_t seqno;
690 struct vmw_sw_context *sw_context = &dev_priv->ctx; 690 struct vmw_sw_context *sw_context = &dev_priv->ctx;
691 struct vmw_master *vmaster = vmw_master(file_priv->master); 691 struct vmw_master *vmaster = vmw_master(file_priv->master);
692 692
@@ -738,7 +738,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
738 vmw_apply_relocations(sw_context); 738 vmw_apply_relocations(sw_context);
739 739
740 if (arg->throttle_us) { 740 if (arg->throttle_us) {
741 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, 741 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
742 arg->throttle_us); 742 arg->throttle_us);
743 743
744 if (unlikely(ret != 0)) 744 if (unlikely(ret != 0))
@@ -755,10 +755,10 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
755 memcpy(cmd, sw_context->cmd_bounce, arg->command_size); 755 memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
756 vmw_fifo_commit(dev_priv, arg->command_size); 756 vmw_fifo_commit(dev_priv, arg->command_size);
757 757
758 ret = vmw_fifo_send_fence(dev_priv, &sequence); 758 ret = vmw_fifo_send_fence(dev_priv, &seqno);
759 759
760 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, 760 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
761 (void *)(unsigned long) sequence); 761 (void *)(unsigned long) seqno);
762 vmw_clear_validations(sw_context); 762 vmw_clear_validations(sw_context);
763 mutex_unlock(&dev_priv->cmdbuf_mutex); 763 mutex_unlock(&dev_priv->cmdbuf_mutex);
764 764
@@ -771,7 +771,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
771 DRM_ERROR("Fence submission error. Syncing.\n"); 771 DRM_ERROR("Fence submission error. Syncing.\n");
772 772
773 fence_rep.error = ret; 773 fence_rep.error = ret;
774 fence_rep.fence_seq = (uint64_t) sequence; 774 fence_rep.fence_seq = (uint64_t) seqno;
775 fence_rep.pad64 = 0; 775 fence_rep.pad64 = 0;
776 776
777 user_fence_rep = (struct drm_vmw_fence_rep __user *) 777 user_fence_rep = (struct drm_vmw_fence_rep __user *)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index aae01b9ae4dc..3ba9cac579e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -127,9 +127,9 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127 (unsigned int) min, 127 (unsigned int) min,
128 (unsigned int) fifo->capabilities); 128 (unsigned int) fifo->capabilities);
129 129
130 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); 130 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
131 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 131 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
132 vmw_fence_queue_init(&fifo->fence_queue); 132 vmw_marker_queue_init(&fifo->marker_queue);
133 return vmw_fifo_send_fence(dev_priv, &dummy); 133 return vmw_fifo_send_fence(dev_priv, &dummy);
134} 134}
135 135
@@ -156,7 +156,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
156 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 156 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
157 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 157 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
158 158
159 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 159 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
160 160
161 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 161 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
162 dev_priv->config_done_state); 162 dev_priv->config_done_state);
@@ -166,7 +166,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
166 dev_priv->traces_state); 166 dev_priv->traces_state);
167 167
168 mutex_unlock(&dev_priv->hw_mutex); 168 mutex_unlock(&dev_priv->hw_mutex);
169 vmw_fence_queue_takedown(&fifo->fence_queue); 169 vmw_marker_queue_takedown(&fifo->marker_queue);
170 170
171 if (likely(fifo->static_buffer != NULL)) { 171 if (likely(fifo->static_buffer != NULL)) {
172 vfree(fifo->static_buffer); 172 vfree(fifo->static_buffer);
@@ -447,7 +447,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
447 mutex_unlock(&fifo_state->fifo_mutex); 447 mutex_unlock(&fifo_state->fifo_mutex);
448} 448}
449 449
450int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 450int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
451{ 451{
452 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 452 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
453 struct svga_fifo_cmd_fence *cmd_fence; 453 struct svga_fifo_cmd_fence *cmd_fence;
@@ -457,16 +457,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
457 457
458 fm = vmw_fifo_reserve(dev_priv, bytes); 458 fm = vmw_fifo_reserve(dev_priv, bytes);
459 if (unlikely(fm == NULL)) { 459 if (unlikely(fm == NULL)) {
460 *sequence = atomic_read(&dev_priv->fence_seq); 460 *seqno = atomic_read(&dev_priv->marker_seq);
461 ret = -ENOMEM; 461 ret = -ENOMEM;
462 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 462 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
463 false, 3*HZ); 463 false, 3*HZ);
464 goto out_err; 464 goto out_err;
465 } 465 }
466 466
467 do { 467 do {
468 *sequence = atomic_add_return(1, &dev_priv->fence_seq); 468 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
469 } while (*sequence == 0); 469 } while (*seqno == 0);
470 470
471 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 471 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
472 472
@@ -483,10 +483,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
483 cmd_fence = (struct svga_fifo_cmd_fence *) 483 cmd_fence = (struct svga_fifo_cmd_fence *)
484 ((unsigned long)fm + sizeof(__le32)); 484 ((unsigned long)fm + sizeof(__le32));
485 485
486 iowrite32(*sequence, &cmd_fence->fence); 486 iowrite32(*seqno, &cmd_fence->fence);
487 vmw_fifo_commit(dev_priv, bytes); 487 vmw_fifo_commit(dev_priv, bytes);
488 (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); 488 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
489 vmw_update_sequence(dev_priv, fifo_state); 489 vmw_update_seqno(dev_priv, fifo_state);
490 490
491out_err: 491out_err:
492 return ret; 492 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index e92298a6a383..48701d2c8c0f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -53,7 +53,7 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
53 return IRQ_NONE; 53 return IRQ_NONE;
54} 54}
55 55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) 56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
57{ 57{
58 uint32_t busy; 58 uint32_t busy;
59 59
@@ -64,43 +64,43 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
64 return (busy == 0); 64 return (busy == 0);
65} 65}
66 66
67void vmw_update_sequence(struct vmw_private *dev_priv, 67void vmw_update_seqno(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state) 68 struct vmw_fifo_state *fifo_state)
69{ 69{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71 71
72 uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 72 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73 73
74 if (dev_priv->last_read_sequence != sequence) { 74 if (dev_priv->last_read_seqno != seqno) {
75 dev_priv->last_read_sequence = sequence; 75 dev_priv->last_read_seqno = seqno;
76 vmw_fence_pull(&fifo_state->fence_queue, sequence); 76 vmw_marker_pull(&fifo_state->marker_queue, seqno);
77 } 77 }
78} 78}
79 79
80bool vmw_fence_signaled(struct vmw_private *dev_priv, 80bool vmw_seqno_passed(struct vmw_private *dev_priv,
81 uint32_t sequence) 81 uint32_t seqno)
82{ 82{
83 struct vmw_fifo_state *fifo_state; 83 struct vmw_fifo_state *fifo_state;
84 bool ret; 84 bool ret;
85 85
86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 86 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
87 return true; 87 return true;
88 88
89 fifo_state = &dev_priv->fifo; 89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state); 90 vmw_update_seqno(dev_priv, fifo_state);
91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 91 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
92 return true; 92 return true;
93 93
94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
95 vmw_fifo_idle(dev_priv, sequence)) 95 vmw_fifo_idle(dev_priv, seqno))
96 return true; 96 return true;
97 97
98 /** 98 /**
99 * Then check if the sequence is higher than what we've actually 99 * Then check if the seqno is higher than what we've actually
100 * emitted. Then the fence is stale and signaled. 100 * emitted. Then the fence is stale and signaled.
101 */ 101 */
102 102
103 ret = ((atomic_read(&dev_priv->fence_seq) - sequence) 103 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
104 > VMW_FENCE_WRAP); 104 > VMW_FENCE_WRAP);
105 105
106 return ret; 106 return ret;
@@ -109,7 +109,7 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
109int vmw_fallback_wait(struct vmw_private *dev_priv, 109int vmw_fallback_wait(struct vmw_private *dev_priv,
110 bool lazy, 110 bool lazy,
111 bool fifo_idle, 111 bool fifo_idle,
112 uint32_t sequence, 112 uint32_t seqno,
113 bool interruptible, 113 bool interruptible,
114 unsigned long timeout) 114 unsigned long timeout)
115{ 115{
@@ -123,7 +123,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
123 DEFINE_WAIT(__wait); 123 DEFINE_WAIT(__wait);
124 124
125 wait_condition = (fifo_idle) ? &vmw_fifo_idle : 125 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
126 &vmw_fence_signaled; 126 &vmw_seqno_passed;
127 127
128 /** 128 /**
129 * Block command submission while waiting for idle. 129 * Block command submission while waiting for idle.
@@ -131,14 +131,14 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
131 131
132 if (fifo_idle) 132 if (fifo_idle)
133 down_read(&fifo_state->rwsem); 133 down_read(&fifo_state->rwsem);
134 signal_seq = atomic_read(&dev_priv->fence_seq); 134 signal_seq = atomic_read(&dev_priv->marker_seq);
135 ret = 0; 135 ret = 0;
136 136
137 for (;;) { 137 for (;;) {
138 prepare_to_wait(&dev_priv->fence_queue, &__wait, 138 prepare_to_wait(&dev_priv->fence_queue, &__wait,
139 (interruptible) ? 139 (interruptible) ?
140 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 140 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
141 if (wait_condition(dev_priv, sequence)) 141 if (wait_condition(dev_priv, seqno))
142 break; 142 break;
143 if (time_after_eq(jiffies, end_jiffies)) { 143 if (time_after_eq(jiffies, end_jiffies)) {
144 DRM_ERROR("SVGA device lockup.\n"); 144 DRM_ERROR("SVGA device lockup.\n");
@@ -175,28 +175,28 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
175 return ret; 175 return ret;
176} 176}
177 177
178int vmw_wait_fence(struct vmw_private *dev_priv, 178int vmw_wait_seqno(struct vmw_private *dev_priv,
179 bool lazy, uint32_t sequence, 179 bool lazy, uint32_t seqno,
180 bool interruptible, unsigned long timeout) 180 bool interruptible, unsigned long timeout)
181{ 181{
182 long ret; 182 long ret;
183 unsigned long irq_flags; 183 unsigned long irq_flags;
184 struct vmw_fifo_state *fifo = &dev_priv->fifo; 184 struct vmw_fifo_state *fifo = &dev_priv->fifo;
185 185
186 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) 186 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
187 return 0; 187 return 0;
188 188
189 if (likely(vmw_fence_signaled(dev_priv, sequence))) 189 if (likely(vmw_seqno_passed(dev_priv, seqno)))
190 return 0; 190 return 0;
191 191
192 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 192 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
193 193
194 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) 194 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
195 return vmw_fallback_wait(dev_priv, lazy, true, sequence, 195 return vmw_fallback_wait(dev_priv, lazy, true, seqno,
196 interruptible, timeout); 196 interruptible, timeout);
197 197
198 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 198 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
199 return vmw_fallback_wait(dev_priv, lazy, false, sequence, 199 return vmw_fallback_wait(dev_priv, lazy, false, seqno,
200 interruptible, timeout); 200 interruptible, timeout);
201 201
202 mutex_lock(&dev_priv->hw_mutex); 202 mutex_lock(&dev_priv->hw_mutex);
@@ -214,12 +214,12 @@ int vmw_wait_fence(struct vmw_private *dev_priv,
214 if (interruptible) 214 if (interruptible)
215 ret = wait_event_interruptible_timeout 215 ret = wait_event_interruptible_timeout
216 (dev_priv->fence_queue, 216 (dev_priv->fence_queue,
217 vmw_fence_signaled(dev_priv, sequence), 217 vmw_seqno_passed(dev_priv, seqno),
218 timeout); 218 timeout);
219 else 219 else
220 ret = wait_event_timeout 220 ret = wait_event_timeout
221 (dev_priv->fence_queue, 221 (dev_priv->fence_queue,
222 vmw_fence_signaled(dev_priv, sequence), 222 vmw_seqno_passed(dev_priv, seqno),
223 timeout); 223 timeout);
224 224
225 if (unlikely(ret == 0)) 225 if (unlikely(ret == 0))
@@ -293,5 +293,5 @@ int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
293 return -EBUSY; 293 return -EBUSY;
294 294
295 timeout = (unsigned long)arg->kernel_cookie - timeout; 295 timeout = (unsigned long)arg->kernel_cookie - timeout;
296 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); 296 return vmw_wait_seqno(vmw_priv(dev), true, arg->seqno, true, timeout);
297} 297}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
index 61eacc1b5ca3..8a8725c2716c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
@@ -28,13 +28,13 @@
28 28
29#include "vmwgfx_drv.h" 29#include "vmwgfx_drv.h"
30 30
31struct vmw_fence { 31struct vmw_marker {
32 struct list_head head; 32 struct list_head head;
33 uint32_t sequence; 33 uint32_t seqno;
34 struct timespec submitted; 34 struct timespec submitted;
35}; 35};
36 36
37void vmw_fence_queue_init(struct vmw_fence_queue *queue) 37void vmw_marker_queue_init(struct vmw_marker_queue *queue)
38{ 38{
39 INIT_LIST_HEAD(&queue->head); 39 INIT_LIST_HEAD(&queue->head);
40 queue->lag = ns_to_timespec(0); 40 queue->lag = ns_to_timespec(0);
@@ -42,38 +42,38 @@ void vmw_fence_queue_init(struct vmw_fence_queue *queue)
42 spin_lock_init(&queue->lock); 42 spin_lock_init(&queue->lock);
43} 43}
44 44
45void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) 45void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
46{ 46{
47 struct vmw_fence *fence, *next; 47 struct vmw_marker *marker, *next;
48 48
49 spin_lock(&queue->lock); 49 spin_lock(&queue->lock);
50 list_for_each_entry_safe(fence, next, &queue->head, head) { 50 list_for_each_entry_safe(marker, next, &queue->head, head) {
51 kfree(fence); 51 kfree(marker);
52 } 52 }
53 spin_unlock(&queue->lock); 53 spin_unlock(&queue->lock);
54} 54}
55 55
56int vmw_fence_push(struct vmw_fence_queue *queue, 56int vmw_marker_push(struct vmw_marker_queue *queue,
57 uint32_t sequence) 57 uint32_t seqno)
58{ 58{
59 struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); 59 struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
60 60
61 if (unlikely(!fence)) 61 if (unlikely(!marker))
62 return -ENOMEM; 62 return -ENOMEM;
63 63
64 fence->sequence = sequence; 64 marker->seqno = seqno;
65 getrawmonotonic(&fence->submitted); 65 getrawmonotonic(&marker->submitted);
66 spin_lock(&queue->lock); 66 spin_lock(&queue->lock);
67 list_add_tail(&fence->head, &queue->head); 67 list_add_tail(&marker->head, &queue->head);
68 spin_unlock(&queue->lock); 68 spin_unlock(&queue->lock);
69 69
70 return 0; 70 return 0;
71} 71}
72 72
73int vmw_fence_pull(struct vmw_fence_queue *queue, 73int vmw_marker_pull(struct vmw_marker_queue *queue,
74 uint32_t signaled_sequence) 74 uint32_t signaled_seqno)
75{ 75{
76 struct vmw_fence *fence, *next; 76 struct vmw_marker *marker, *next;
77 struct timespec now; 77 struct timespec now;
78 bool updated = false; 78 bool updated = false;
79 79
@@ -87,15 +87,15 @@ int vmw_fence_pull(struct vmw_fence_queue *queue,
87 goto out_unlock; 87 goto out_unlock;
88 } 88 }
89 89
90 list_for_each_entry_safe(fence, next, &queue->head, head) { 90 list_for_each_entry_safe(marker, next, &queue->head, head) {
91 if (signaled_sequence - fence->sequence > (1 << 30)) 91 if (signaled_seqno - marker->seqno > (1 << 30))
92 continue; 92 continue;
93 93
94 queue->lag = timespec_sub(now, fence->submitted); 94 queue->lag = timespec_sub(now, marker->submitted);
95 queue->lag_time = now; 95 queue->lag_time = now;
96 updated = true; 96 updated = true;
97 list_del(&fence->head); 97 list_del(&marker->head);
98 kfree(fence); 98 kfree(marker);
99 } 99 }
100 100
101out_unlock: 101out_unlock:
@@ -117,7 +117,7 @@ static struct timespec vmw_timespec_add(struct timespec t1,
117 return t1; 117 return t1;
118} 118}
119 119
120static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) 120static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
121{ 121{
122 struct timespec now; 122 struct timespec now;
123 123
@@ -131,7 +131,7 @@ static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue)
131} 131}
132 132
133 133
134static bool vmw_lag_lt(struct vmw_fence_queue *queue, 134static bool vmw_lag_lt(struct vmw_marker_queue *queue,
135 uint32_t us) 135 uint32_t us)
136{ 136{
137 struct timespec lag, cond; 137 struct timespec lag, cond;
@@ -142,32 +142,30 @@ static bool vmw_lag_lt(struct vmw_fence_queue *queue,
142} 142}
143 143
144int vmw_wait_lag(struct vmw_private *dev_priv, 144int vmw_wait_lag(struct vmw_private *dev_priv,
145 struct vmw_fence_queue *queue, uint32_t us) 145 struct vmw_marker_queue *queue, uint32_t us)
146{ 146{
147 struct vmw_fence *fence; 147 struct vmw_marker *marker;
148 uint32_t sequence; 148 uint32_t seqno;
149 int ret; 149 int ret;
150 150
151 while (!vmw_lag_lt(queue, us)) { 151 while (!vmw_lag_lt(queue, us)) {
152 spin_lock(&queue->lock); 152 spin_lock(&queue->lock);
153 if (list_empty(&queue->head)) 153 if (list_empty(&queue->head))
154 sequence = atomic_read(&dev_priv->fence_seq); 154 seqno = atomic_read(&dev_priv->marker_seq);
155 else { 155 else {
156 fence = list_first_entry(&queue->head, 156 marker = list_first_entry(&queue->head,
157 struct vmw_fence, head); 157 struct vmw_marker, head);
158 sequence = fence->sequence; 158 seqno = marker->seqno;
159 } 159 }
160 spin_unlock(&queue->lock); 160 spin_unlock(&queue->lock);
161 161
162 ret = vmw_wait_fence(dev_priv, false, sequence, true, 162 ret = vmw_wait_seqno(dev_priv, false, seqno, true,
163 3*HZ); 163 3*HZ);
164 164
165 if (unlikely(ret != 0)) 165 if (unlikely(ret != 0))
166 return ret; 166 return ret;
167 167
168 (void) vmw_fence_pull(queue, sequence); 168 (void) vmw_marker_pull(queue, seqno);
169 } 169 }
170 return 0; 170 return 0;
171} 171}
172
173
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index 467b80c7485d..c2b3909ac50a 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -289,7 +289,7 @@ union drm_vmw_surface_reference_arg {
289 * DRM_VMW_EXECBUF 289 * DRM_VMW_EXECBUF
290 * 290 *
291 * Submit a command buffer for execution on the host, and return a 291 * Submit a command buffer for execution on the host, and return a
292 * fence sequence that when signaled, indicates that the command buffer has 292 * fence seqno that when signaled, indicates that the command buffer has
293 * executed. 293 * executed.
294 */ 294 */
295 295
@@ -325,7 +325,7 @@ struct drm_vmw_execbuf_arg {
325/** 325/**
326 * struct drm_vmw_fence_rep 326 * struct drm_vmw_fence_rep
327 * 327 *
328 * @fence_seq: Fence sequence associated with a command submission. 328 * @fence_seq: Fence seqno associated with a command submission.
329 * @error: This member should've been set to -EFAULT on submission. 329 * @error: This member should've been set to -EFAULT on submission.
330 * The following actions should be take on completion: 330 * The following actions should be take on completion:
331 * error == -EFAULT: Fence communication failed. The host is synchronized. 331 * error == -EFAULT: Fence communication failed. The host is synchronized.
@@ -432,7 +432,7 @@ struct drm_vmw_unref_dmabuf_arg {
432 432
433 433
434struct drm_vmw_fence_wait_arg { 434struct drm_vmw_fence_wait_arg {
435 uint64_t sequence; 435 uint64_t seqno;
436 uint64_t kernel_cookie; 436 uint64_t kernel_cookie;
437 int32_t cookie_valid; 437 int32_t cookie_valid;
438 int32_t pad64; 438 int32_t pad64;