diff options
author | Eric Anholt <eric@anholt.net> | 2008-09-01 19:45:29 -0400 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2008-10-17 17:10:51 -0400 |
commit | 546b0974c39657017407c86fe79811100b60700d (patch) | |
tree | 42ae164d23ecaa1cb78ad87ad9603e0bdd29740d /drivers/gpu | |
parent | ed4c9c4acf948b42b138747fcb8843ecb1a24ce4 (diff) |
i915: Use struct_mutex to protect ring in GEM mode.
In the conversion for GEM, we had stopped using the hardware lock to protect
ring usage, since it was all internal to the DRM now. However, some paths
weren't converted to using struct_mutex to prevent multiple threads from
concurrently working on the ring, in particular between the vblank swap handler
and ioctls.
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 28 |
4 files changed, 63 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ea85d71cab04..d71c89f8802e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev) | |||
588 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 588 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
589 | struct drm_file *file_priv) | 589 | struct drm_file *file_priv) |
590 | { | 590 | { |
591 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 591 | int ret; |
592 | |||
593 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); | ||
592 | 594 | ||
593 | return i915_quiescent(dev); | 595 | mutex_lock(&dev->struct_mutex); |
596 | ret = i915_quiescent(dev); | ||
597 | mutex_unlock(&dev->struct_mutex); | ||
598 | |||
599 | return ret; | ||
594 | } | 600 | } |
595 | 601 | ||
596 | static int i915_batchbuffer(struct drm_device *dev, void *data, | 602 | static int i915_batchbuffer(struct drm_device *dev, void *data, |
@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data, | |||
611 | DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", | 617 | DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", |
612 | batch->start, batch->used, batch->num_cliprects); | 618 | batch->start, batch->used, batch->num_cliprects); |
613 | 619 | ||
614 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 620 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
615 | 621 | ||
616 | if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, | 622 | if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, |
617 | batch->num_cliprects * | 623 | batch->num_cliprects * |
618 | sizeof(struct drm_clip_rect))) | 624 | sizeof(struct drm_clip_rect))) |
619 | return -EFAULT; | 625 | return -EFAULT; |
620 | 626 | ||
627 | mutex_lock(&dev->struct_mutex); | ||
621 | ret = i915_dispatch_batchbuffer(dev, batch); | 628 | ret = i915_dispatch_batchbuffer(dev, batch); |
629 | mutex_unlock(&dev->struct_mutex); | ||
622 | 630 | ||
623 | sarea_priv->last_dispatch = (int)hw_status[5]; | 631 | sarea_priv->last_dispatch = (int)hw_status[5]; |
624 | return ret; | 632 | return ret; |
@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
637 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", | 645 | DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", |
638 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); | 646 | cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); |
639 | 647 | ||
640 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 648 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
641 | 649 | ||
642 | if (cmdbuf->num_cliprects && | 650 | if (cmdbuf->num_cliprects && |
643 | DRM_VERIFYAREA_READ(cmdbuf->cliprects, | 651 | DRM_VERIFYAREA_READ(cmdbuf->cliprects, |
@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
647 | return -EFAULT; | 655 | return -EFAULT; |
648 | } | 656 | } |
649 | 657 | ||
658 | mutex_lock(&dev->struct_mutex); | ||
650 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf); | 659 | ret = i915_dispatch_cmdbuffer(dev, cmdbuf); |
660 | mutex_unlock(&dev->struct_mutex); | ||
651 | if (ret) { | 661 | if (ret) { |
652 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); | 662 | DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); |
653 | return ret; | 663 | return ret; |
@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data, | |||
660 | static int i915_flip_bufs(struct drm_device *dev, void *data, | 670 | static int i915_flip_bufs(struct drm_device *dev, void *data, |
661 | struct drm_file *file_priv) | 671 | struct drm_file *file_priv) |
662 | { | 672 | { |
673 | int ret; | ||
674 | |||
663 | DRM_DEBUG("%s\n", __func__); | 675 | DRM_DEBUG("%s\n", __func__); |
664 | 676 | ||
665 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 677 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
666 | 678 | ||
667 | return i915_dispatch_flip(dev); | 679 | mutex_lock(&dev->struct_mutex); |
680 | ret = i915_dispatch_flip(dev); | ||
681 | mutex_unlock(&dev->struct_mutex); | ||
682 | |||
683 | return ret; | ||
668 | } | 684 | } |
669 | 685 | ||
670 | static int i915_getparam(struct drm_device *dev, void *data, | 686 | static int i915_getparam(struct drm_device *dev, void *data, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 87b071ab8647..8547f0aeafc6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -285,6 +285,9 @@ typedef struct drm_i915_private { | |||
285 | */ | 285 | */ |
286 | struct delayed_work retire_work; | 286 | struct delayed_work retire_work; |
287 | 287 | ||
288 | /** Work task for vblank-related ring access */ | ||
289 | struct work_struct vblank_work; | ||
290 | |||
288 | uint32_t next_gem_seqno; | 291 | uint32_t next_gem_seqno; |
289 | 292 | ||
290 | /** | 293 | /** |
@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data, | |||
435 | void i915_user_irq_get(struct drm_device *dev); | 438 | void i915_user_irq_get(struct drm_device *dev); |
436 | void i915_user_irq_put(struct drm_device *dev); | 439 | void i915_user_irq_put(struct drm_device *dev); |
437 | 440 | ||
441 | extern void i915_gem_vblank_work_handler(struct work_struct *work); | ||
438 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | 442 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); |
439 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | 443 | extern void i915_driver_irq_preinstall(struct drm_device * dev); |
440 | extern int i915_driver_irq_postinstall(struct drm_device *dev); | 444 | extern int i915_driver_irq_postinstall(struct drm_device *dev); |
@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev); | |||
538 | extern void opregion_asle_intr(struct drm_device *dev); | 542 | extern void opregion_asle_intr(struct drm_device *dev); |
539 | extern void opregion_enable_asle(struct drm_device *dev); | 543 | extern void opregion_enable_asle(struct drm_device *dev); |
540 | 544 | ||
545 | /** | ||
546 | * Lock test for when it's just for synchronization of ring access. | ||
547 | * | ||
548 | * In that case, we don't need to do it when GEM is initialized as nobody else | ||
549 | * has access to the ring. | ||
550 | */ | ||
551 | #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ | ||
552 | if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ | ||
553 | LOCK_TEST_WITH_RETURN(dev, file_priv); \ | ||
554 | } while (0) | ||
555 | |||
541 | #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) | 556 | #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) |
542 | #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) | 557 | #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) |
543 | #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) | 558 | #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 90ae8a0369f7..bb6e5a37efa2 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev) | |||
2491 | INIT_LIST_HEAD(&dev_priv->mm.request_list); | 2491 | INIT_LIST_HEAD(&dev_priv->mm.request_list); |
2492 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 2492 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
2493 | i915_gem_retire_work_handler); | 2493 | i915_gem_retire_work_handler); |
2494 | INIT_WORK(&dev_priv->mm.vblank_work, | ||
2495 | i915_gem_vblank_work_handler); | ||
2494 | dev_priv->mm.next_gem_seqno = 1; | 2496 | dev_priv->mm.next_gem_seqno = 1; |
2495 | 2497 | ||
2496 | i915_gem_detect_bit_6_swizzle(dev); | 2498 | i915_gem_detect_bit_6_swizzle(dev); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index f295bdf16e2d..d04c526410a9 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane) | |||
349 | return count; | 349 | return count; |
350 | } | 350 | } |
351 | 351 | ||
352 | void | ||
353 | i915_gem_vblank_work_handler(struct work_struct *work) | ||
354 | { | ||
355 | drm_i915_private_t *dev_priv; | ||
356 | struct drm_device *dev; | ||
357 | |||
358 | dev_priv = container_of(work, drm_i915_private_t, | ||
359 | mm.vblank_work); | ||
360 | dev = dev_priv->dev; | ||
361 | |||
362 | mutex_lock(&dev->struct_mutex); | ||
363 | i915_vblank_tasklet(dev); | ||
364 | mutex_unlock(&dev->struct_mutex); | ||
365 | } | ||
366 | |||
352 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | 367 | irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) |
353 | { | 368 | { |
354 | struct drm_device *dev = (struct drm_device *) arg; | 369 | struct drm_device *dev = (struct drm_device *) arg; |
@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | |||
422 | if (iir & I915_ASLE_INTERRUPT) | 437 | if (iir & I915_ASLE_INTERRUPT) |
423 | opregion_asle_intr(dev); | 438 | opregion_asle_intr(dev); |
424 | 439 | ||
425 | if (vblank && dev_priv->swaps_pending > 0) | 440 | if (vblank && dev_priv->swaps_pending > 0) { |
426 | drm_locked_tasklet(dev, i915_vblank_tasklet); | 441 | if (dev_priv->ring.ring_obj == NULL) |
442 | drm_locked_tasklet(dev, i915_vblank_tasklet); | ||
443 | else | ||
444 | schedule_work(&dev_priv->mm.vblank_work); | ||
445 | } | ||
427 | 446 | ||
428 | return IRQ_HANDLED; | 447 | return IRQ_HANDLED; |
429 | } | 448 | } |
@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data, | |||
514 | drm_i915_irq_emit_t *emit = data; | 533 | drm_i915_irq_emit_t *emit = data; |
515 | int result; | 534 | int result; |
516 | 535 | ||
517 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 536 | RING_LOCK_TEST_WITH_RETURN(dev, file_priv); |
518 | 537 | ||
519 | if (!dev_priv) { | 538 | if (!dev_priv) { |
520 | DRM_ERROR("called with no initialization\n"); | 539 | DRM_ERROR("called with no initialization\n"); |
521 | return -EINVAL; | 540 | return -EINVAL; |
522 | } | 541 | } |
523 | 542 | mutex_lock(&dev->struct_mutex); | |
524 | result = i915_emit_irq(dev); | 543 | result = i915_emit_irq(dev); |
544 | mutex_unlock(&dev->struct_mutex); | ||
525 | 545 | ||
526 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { | 546 | if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { |
527 | DRM_ERROR("copy_to_user\n"); | 547 | DRM_ERROR("copy_to_user\n"); |