aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2010-02-08 04:57:25 -0500
committerDave Airlie <airlied@redhat.com>2010-02-10 21:06:05 -0500
commit85b9e4878f3b16993fba871c0c68d0948ec9c7c6 (patch)
tree44e6ad34d467341be0cfea6605ed50c2e92b77a6 /drivers/gpu
parenta87897edbae2d60db7bcb6bb0a75e82013d68305 (diff)
drm/vmwgfx: Fix a circular locking dependency bug.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c13
3 files changed, 14 insertions, 19 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 3e4e670d321..356dc935ec1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -113,6 +113,7 @@ struct vmw_fifo_state {
113 unsigned long static_buffer_size; 113 unsigned long static_buffer_size;
114 bool using_bounce_buffer; 114 bool using_bounce_buffer;
115 uint32_t capabilities; 115 uint32_t capabilities;
116 struct mutex fifo_mutex;
116 struct rw_semaphore rwsem; 117 struct rw_semaphore rwsem;
117}; 118};
118 119
@@ -213,7 +214,7 @@ struct vmw_private {
213 * Fencing and IRQs. 214 * Fencing and IRQs.
214 */ 215 */
215 216
216 uint32_t fence_seq; 217 atomic_t fence_seq;
217 wait_queue_head_t fence_queue; 218 wait_queue_head_t fence_queue;
218 wait_queue_head_t fifo_queue; 219 wait_queue_head_t fifo_queue;
219 atomic_t fence_queue_waiters; 220 atomic_t fence_queue_waiters;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 4157547cc6e..39d43a01d84 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -74,6 +74,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
74 fifo->reserved_size = 0; 74 fifo->reserved_size = 0;
75 fifo->using_bounce_buffer = false; 75 fifo->using_bounce_buffer = false;
76 76
77 mutex_init(&fifo->fifo_mutex);
77 init_rwsem(&fifo->rwsem); 78 init_rwsem(&fifo->rwsem);
78 79
79 /* 80 /*
@@ -117,7 +118,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
117 (unsigned int) min, 118 (unsigned int) min,
118 (unsigned int) fifo->capabilities); 119 (unsigned int) fifo->capabilities);
119 120
120 dev_priv->fence_seq = dev_priv->last_read_sequence; 121 atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
121 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); 122 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
122 123
123 return vmw_fifo_send_fence(dev_priv, &dummy); 124 return vmw_fifo_send_fence(dev_priv, &dummy);
@@ -283,7 +284,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
283 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 284 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
284 int ret; 285 int ret;
285 286
286 down_write(&fifo_state->rwsem); 287 mutex_lock(&fifo_state->fifo_mutex);
287 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 288 max = ioread32(fifo_mem + SVGA_FIFO_MAX);
288 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 289 min = ioread32(fifo_mem + SVGA_FIFO_MIN);
289 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 290 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
@@ -351,7 +352,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
351 } 352 }
352out_err: 353out_err:
353 fifo_state->reserved_size = 0; 354 fifo_state->reserved_size = 0;
354 up_write(&fifo_state->rwsem); 355 mutex_unlock(&fifo_state->fifo_mutex);
355 return NULL; 356 return NULL;
356} 357}
357 358
@@ -426,6 +427,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
426 427
427 } 428 }
428 429
430 down_write(&fifo_state->rwsem);
429 if (fifo_state->using_bounce_buffer || reserveable) { 431 if (fifo_state->using_bounce_buffer || reserveable) {
430 next_cmd += bytes; 432 next_cmd += bytes;
431 if (next_cmd >= max) 433 if (next_cmd >= max)
@@ -437,8 +439,9 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
437 if (reserveable) 439 if (reserveable)
438 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 440 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
439 mb(); 441 mb();
440 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
441 up_write(&fifo_state->rwsem); 442 up_write(&fifo_state->rwsem);
443 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
444 mutex_unlock(&fifo_state->fifo_mutex);
442} 445}
443 446
444int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) 447int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
@@ -451,9 +454,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
451 454
452 fm = vmw_fifo_reserve(dev_priv, bytes); 455 fm = vmw_fifo_reserve(dev_priv, bytes);
453 if (unlikely(fm == NULL)) { 456 if (unlikely(fm == NULL)) {
454 down_write(&fifo_state->rwsem); 457 *sequence = atomic_read(&dev_priv->fence_seq);
455 *sequence = dev_priv->fence_seq;
456 up_write(&fifo_state->rwsem);
457 ret = -ENOMEM; 458 ret = -ENOMEM;
458 (void)vmw_fallback_wait(dev_priv, false, true, *sequence, 459 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
459 false, 3*HZ); 460 false, 3*HZ);
@@ -461,7 +462,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
461 } 462 }
462 463
463 do { 464 do {
464 *sequence = dev_priv->fence_seq++; 465 *sequence = atomic_add_return(1, &dev_priv->fence_seq);
465 } while (*sequence == 0); 466 } while (*sequence == 0);
466 467
467 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 468 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index d40086fc864..4d7cb539386 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -85,19 +85,12 @@ bool vmw_fence_signaled(struct vmw_private *dev_priv,
85 return true; 85 return true;
86 86
87 /** 87 /**
88 * Below is to signal stale fences that have wrapped.
89 * First, block fence submission.
90 */
91
92 down_read(&fifo_state->rwsem);
93
94 /**
95 * Then check if the sequence is higher than what we've actually 88 * Then check if the sequence is higher than what we've actually
96 * emitted. Then the fence is stale and signaled. 89 * emitted. Then the fence is stale and signaled.
97 */ 90 */
98 91
99 ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP); 92 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
100 up_read(&fifo_state->rwsem); 93 > VMW_FENCE_WRAP);
101 94
102 return ret; 95 return ret;
103} 96}
@@ -127,7 +120,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
127 120
128 if (fifo_idle) 121 if (fifo_idle)
129 down_read(&fifo_state->rwsem); 122 down_read(&fifo_state->rwsem);
130 signal_seq = dev_priv->fence_seq; 123 signal_seq = atomic_read(&dev_priv->fence_seq);
131 ret = 0; 124 ret = 0;
132 125
133 for (;;) { 126 for (;;) {