diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2011-09-01 16:18:43 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2011-09-06 06:48:43 -0400 |
commit | 4f73a96bd76914009682432842ac04a32ab9115b (patch) | |
tree | 65264c569d796be5388008b8303738b642fd9c83 | |
parent | 6bcd8d3c782b7b2c98c8f414a6bb43cf6b84e53c (diff) |
vmwgfx: Make vmw_wait_seqno a bit more readable
Break out on-demand enabling and disabling of fence irqs to make
the function more readable. Also make dev_priv->fence_queue_waiters an int
instead of an atomic_t since we only manipulate it with dev_priv->hw_mutex
held.
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 57 |
3 files changed, 37 insertions, 24 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index c8b5a53f140b..4f65f1e34b8f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -291,7 +291,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
291 | mutex_init(&dev_priv->init_mutex); | 291 | mutex_init(&dev_priv->init_mutex); |
292 | init_waitqueue_head(&dev_priv->fence_queue); | 292 | init_waitqueue_head(&dev_priv->fence_queue); |
293 | init_waitqueue_head(&dev_priv->fifo_queue); | 293 | init_waitqueue_head(&dev_priv->fifo_queue); |
294 | atomic_set(&dev_priv->fence_queue_waiters, 0); | 294 | dev_priv->fence_queue_waiters = 0; |
295 | atomic_set(&dev_priv->fifo_queue_waiters, 0); | 295 | atomic_set(&dev_priv->fifo_queue_waiters, 0); |
296 | 296 | ||
297 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); | 297 | dev_priv->io_start = pci_resource_start(dev->pdev, 0); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 9c3016b53eac..3018871aaaff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -241,7 +241,7 @@ struct vmw_private { | |||
241 | atomic_t marker_seq; | 241 | atomic_t marker_seq; |
242 | wait_queue_head_t fence_queue; | 242 | wait_queue_head_t fence_queue; |
243 | wait_queue_head_t fifo_queue; | 243 | wait_queue_head_t fifo_queue; |
244 | atomic_t fence_queue_waiters; | 244 | int fence_queue_waiters; /* Protected by hw_mutex */ |
245 | atomic_t fifo_queue_waiters; | 245 | atomic_t fifo_queue_waiters; |
246 | uint32_t last_read_seqno; | 246 | uint32_t last_read_seqno; |
247 | spinlock_t irq_lock; | 247 | spinlock_t irq_lock; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 48701d2c8c0f..13dde06b60be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -175,12 +175,43 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
175 | return ret; | 175 | return ret; |
176 | } | 176 | } |
177 | 177 | ||
178 | static void vmw_seqno_waiter_add(struct vmw_private *dev_priv) | ||
179 | { | ||
180 | mutex_lock(&dev_priv->hw_mutex); | ||
181 | if (dev_priv->fence_queue_waiters++ == 0) { | ||
182 | unsigned long irq_flags; | ||
183 | |||
184 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
185 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
186 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
187 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
188 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
189 | SVGA_IRQFLAG_ANY_FENCE); | ||
190 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
191 | } | ||
192 | mutex_unlock(&dev_priv->hw_mutex); | ||
193 | } | ||
194 | |||
195 | static void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) | ||
196 | { | ||
197 | mutex_lock(&dev_priv->hw_mutex); | ||
198 | if (--dev_priv->fence_queue_waiters == 0) { | ||
199 | unsigned long irq_flags; | ||
200 | |||
201 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
202 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
203 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
204 | ~SVGA_IRQFLAG_ANY_FENCE); | ||
205 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
206 | } | ||
207 | mutex_unlock(&dev_priv->hw_mutex); | ||
208 | } | ||
209 | |||
178 | int vmw_wait_seqno(struct vmw_private *dev_priv, | 210 | int vmw_wait_seqno(struct vmw_private *dev_priv, |
179 | bool lazy, uint32_t seqno, | 211 | bool lazy, uint32_t seqno, |
180 | bool interruptible, unsigned long timeout) | 212 | bool interruptible, unsigned long timeout) |
181 | { | 213 | { |
182 | long ret; | 214 | long ret; |
183 | unsigned long irq_flags; | ||
184 | struct vmw_fifo_state *fifo = &dev_priv->fifo; | 215 | struct vmw_fifo_state *fifo = &dev_priv->fifo; |
185 | 216 | ||
186 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) | 217 | if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) |
@@ -199,17 +230,7 @@ int vmw_wait_seqno(struct vmw_private *dev_priv, | |||
199 | return vmw_fallback_wait(dev_priv, lazy, false, seqno, | 230 | return vmw_fallback_wait(dev_priv, lazy, false, seqno, |
200 | interruptible, timeout); | 231 | interruptible, timeout); |
201 | 232 | ||
202 | mutex_lock(&dev_priv->hw_mutex); | 233 | vmw_seqno_waiter_add(dev_priv); |
203 | if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) { | ||
204 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
205 | outl(SVGA_IRQFLAG_ANY_FENCE, | ||
206 | dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); | ||
207 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
208 | vmw_read(dev_priv, SVGA_REG_IRQMASK) | | ||
209 | SVGA_IRQFLAG_ANY_FENCE); | ||
210 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
211 | } | ||
212 | mutex_unlock(&dev_priv->hw_mutex); | ||
213 | 234 | ||
214 | if (interruptible) | 235 | if (interruptible) |
215 | ret = wait_event_interruptible_timeout | 236 | ret = wait_event_interruptible_timeout |
@@ -222,21 +243,13 @@ int vmw_wait_seqno(struct vmw_private *dev_priv, | |||
222 | vmw_seqno_passed(dev_priv, seqno), | 243 | vmw_seqno_passed(dev_priv, seqno), |
223 | timeout); | 244 | timeout); |
224 | 245 | ||
246 | vmw_seqno_waiter_remove(dev_priv); | ||
247 | |||
225 | if (unlikely(ret == 0)) | 248 | if (unlikely(ret == 0)) |
226 | ret = -EBUSY; | 249 | ret = -EBUSY; |
227 | else if (likely(ret > 0)) | 250 | else if (likely(ret > 0)) |
228 | ret = 0; | 251 | ret = 0; |
229 | 252 | ||
230 | mutex_lock(&dev_priv->hw_mutex); | ||
231 | if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) { | ||
232 | spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); | ||
233 | vmw_write(dev_priv, SVGA_REG_IRQMASK, | ||
234 | vmw_read(dev_priv, SVGA_REG_IRQMASK) & | ||
235 | ~SVGA_IRQFLAG_ANY_FENCE); | ||
236 | spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); | ||
237 | } | ||
238 | mutex_unlock(&dev_priv->hw_mutex); | ||
239 | |||
240 | return ret; | 253 | return ret; |
241 | } | 254 | } |
242 | 255 | ||