aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index aae01b9ae4dc..3ba9cac579e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -127,9 +127,9 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
127 (unsigned int) min, 127 (unsigned int) min,
128 (unsigned int) fifo->capabilities); 128 (unsigned int) fifo->capabilities);
129 129
130 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); 130 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
131 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 131 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
132 vmw_fence_queue_init(&fifo->fence_queue); 132 vmw_marker_queue_init(&fifo->marker_queue);
133 return vmw_fifo_send_fence(dev_priv, &dummy); 133 return vmw_fifo_send_fence(dev_priv, &dummy);
134} 134}
135 135
@@ -156,7 +156,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
156 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 156 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
157 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 157 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
158 158
159 dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); 159 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
160 160
161 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 161 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
162 dev_priv->config_done_state); 162 dev_priv->config_done_state);
@@ -166,7 +166,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
166 dev_priv->traces_state); 166 dev_priv->traces_state);
167 167
168 mutex_unlock(&dev_priv->hw_mutex); 168 mutex_unlock(&dev_priv->hw_mutex);
169 vmw_fence_queue_takedown(&fifo->fence_queue); 169 vmw_marker_queue_takedown(&fifo->marker_queue);
170 170
171 if (likely(fifo->static_buffer != NULL)) { 171 if (likely(fifo->static_buffer != NULL)) {
172 vfree(fifo->static_buffer); 172 vfree(fifo->static_buffer);
@@ -447,7 +447,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
447 mutex_unlock(&fifo_state->fifo_mutex); 447 mutex_unlock(&fifo_state->fifo_mutex);
448} 448}
449 449
450int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 450int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
451{ 451{
452 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 452 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
453 struct svga_fifo_cmd_fence *cmd_fence; 453 struct svga_fifo_cmd_fence *cmd_fence;
@@ -457,16 +457,16 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
457 457
458 fm = vmw_fifo_reserve(dev_priv, bytes); 458 fm = vmw_fifo_reserve(dev_priv, bytes);
459 if (unlikely(fm == NULL)) { 459 if (unlikely(fm == NULL)) {
460 *sequence = atomic_read(&dev_priv->fence_seq); 460 *seqno = atomic_read(&dev_priv->marker_seq);
461 ret = -ENOMEM; 461 ret = -ENOMEM;
462 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 462 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
463 false, 3*HZ); 463 false, 3*HZ);
464 goto out_err; 464 goto out_err;
465 } 465 }
466 466
467 do { 467 do {
468 *sequence = atomic_add_return(1, &dev_priv->fence_seq); 468 *seqno = atomic_add_return(1, &dev_priv->marker_seq);
469 } while (*sequence == 0); 469 } while (*seqno == 0);
470 470
471 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 471 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
472 472
@@ -483,10 +483,10 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
483 cmd_fence = (struct svga_fifo_cmd_fence *) 483 cmd_fence = (struct svga_fifo_cmd_fence *)
484 ((unsigned long)fm + sizeof(__le32)); 484 ((unsigned long)fm + sizeof(__le32));
485 485
486 iowrite32(*sequence, &cmd_fence->fence); 486 iowrite32(*seqno, &cmd_fence->fence);
487 vmw_fifo_commit(dev_priv, bytes); 487 vmw_fifo_commit(dev_priv, bytes);
488 (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); 488 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
489 vmw_update_sequence(dev_priv, fifo_state); 489 vmw_update_seqno(dev_priv, fifo_state);
490 490
491out_err: 491out_err:
492 return ret; 492 return ret;