aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-12-13 11:54:50 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2010-12-14 06:34:46 -0500
commitb13c2b96bf15b9dd0f1a45fd788f3a3025c5aec6 (patch)
tree6eb68ba8f5e4ec977efc3e087435ef272534915c /drivers/gpu/drm/i915/intel_ringbuffer.c
parent8d5203ca62539c6ab36a5bc2402c2de1de460e30 (diff)
drm/i915/ringbuffer: Make IRQ refcnting atomic
In order to enforce the correct memory barriers for irq get/put, we need to perform the actual counting using atomic operations. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 74b99718a1fb..a3fd993e0de0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -327,25 +327,28 @@ ring_get_seqno(struct intel_ring_buffer *ring)
327 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 327 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
328} 328}
329 329
330static void 330static bool
331render_ring_get_irq(struct intel_ring_buffer *ring) 331render_ring_get_irq(struct intel_ring_buffer *ring)
332{ 332{
333 struct drm_device *dev = ring->dev; 333 struct drm_device *dev = ring->dev;
334 334
335 if (dev->irq_enabled && ++ring->irq_refcount == 1) { 335 if (!dev->irq_enabled)
336 return false;
337
338 if (atomic_inc_return(&ring->irq_refcount) == 1) {
336 drm_i915_private_t *dev_priv = dev->dev_private; 339 drm_i915_private_t *dev_priv = dev->dev_private;
337 unsigned long irqflags; 340 unsigned long irqflags;
338 341
339 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 342 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
340
341 if (HAS_PCH_SPLIT(dev)) 343 if (HAS_PCH_SPLIT(dev))
342 ironlake_enable_graphics_irq(dev_priv, 344 ironlake_enable_graphics_irq(dev_priv,
343 GT_USER_INTERRUPT); 345 GT_USER_INTERRUPT);
344 else 346 else
345 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 347 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
346
347 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 348 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
348 } 349 }
350
351 return true;
349} 352}
350 353
351static void 354static void
@@ -353,8 +356,7 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
353{ 356{
354 struct drm_device *dev = ring->dev; 357 struct drm_device *dev = ring->dev;
355 358
356 BUG_ON(dev->irq_enabled && ring->irq_refcount == 0); 359 if (atomic_dec_and_test(&ring->irq_refcount)) {
357 if (dev->irq_enabled && --ring->irq_refcount == 0) {
358 drm_i915_private_t *dev_priv = dev->dev_private; 360 drm_i915_private_t *dev_priv = dev->dev_private;
359 unsigned long irqflags; 361 unsigned long irqflags;
360 362
@@ -417,12 +419,15 @@ ring_add_request(struct intel_ring_buffer *ring,
417 return 0; 419 return 0;
418} 420}
419 421
420static void 422static bool
421ring_get_irq(struct intel_ring_buffer *ring, u32 flag) 423ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
422{ 424{
423 struct drm_device *dev = ring->dev; 425 struct drm_device *dev = ring->dev;
424 426
425 if (dev->irq_enabled && ++ring->irq_refcount == 1) { 427 if (!dev->irq_enabled)
428 return false;
429
430 if (atomic_inc_return(&ring->irq_refcount) == 1) {
426 drm_i915_private_t *dev_priv = dev->dev_private; 431 drm_i915_private_t *dev_priv = dev->dev_private;
427 unsigned long irqflags; 432 unsigned long irqflags;
428 433
@@ -430,6 +435,8 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
430 ironlake_enable_graphics_irq(dev_priv, flag); 435 ironlake_enable_graphics_irq(dev_priv, flag);
431 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 436 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
432 } 437 }
438
439 return true;
433} 440}
434 441
435static void 442static void
@@ -437,7 +444,7 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
437{ 444{
438 struct drm_device *dev = ring->dev; 445 struct drm_device *dev = ring->dev;
439 446
440 if (dev->irq_enabled && --ring->irq_refcount == 0) { 447 if (atomic_dec_and_test(&ring->irq_refcount)) {
441 drm_i915_private_t *dev_priv = dev->dev_private; 448 drm_i915_private_t *dev_priv = dev->dev_private;
442 unsigned long irqflags; 449 unsigned long irqflags;
443 450
@@ -447,16 +454,15 @@ ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
447 } 454 }
448} 455}
449 456
450 457static bool
451static void
452bsd_ring_get_irq(struct intel_ring_buffer *ring) 458bsd_ring_get_irq(struct intel_ring_buffer *ring)
453{ 459{
454 ring_get_irq(ring, GT_BSD_USER_INTERRUPT); 460 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
455} 461}
456static void 462static void
457bsd_ring_put_irq(struct intel_ring_buffer *ring) 463bsd_ring_put_irq(struct intel_ring_buffer *ring)
458{ 464{
459 ring_put_irq(ring, GT_BSD_USER_INTERRUPT); 465 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
460} 466}
461 467
462static int 468static int
@@ -846,16 +852,16 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
846 return 0; 852 return 0;
847} 853}
848 854
849static void 855static bool
850gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring) 856gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
851{ 857{
852 ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 858 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
853} 859}
854 860
855static void 861static void
856gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring) 862gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
857{ 863{
858 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT); 864 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
859} 865}
860 866
861/* ring buffer for Video Codec for Gen6+ */ 867/* ring buffer for Video Codec for Gen6+ */
@@ -876,16 +882,16 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
876 882
877/* Blitter support (SandyBridge+) */ 883/* Blitter support (SandyBridge+) */
878 884
879static void 885static bool
880blt_ring_get_irq(struct intel_ring_buffer *ring) 886blt_ring_get_irq(struct intel_ring_buffer *ring)
881{ 887{
882 ring_get_irq(ring, GT_BLT_USER_INTERRUPT); 888 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
883} 889}
884 890
885static void 891static void
886blt_ring_put_irq(struct intel_ring_buffer *ring) 892blt_ring_put_irq(struct intel_ring_buffer *ring)
887{ 893{
888 ring_put_irq(ring, GT_BLT_USER_INTERRUPT); 894 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
889} 895}
890 896
891 897