diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 88 |
1 files changed, 53 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index e9e6f71418a4..95c4b1429935 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -236,7 +236,7 @@ init_pipe_control(struct intel_ring_buffer *ring) | |||
236 | ret = -ENOMEM; | 236 | ret = -ENOMEM; |
237 | goto err; | 237 | goto err; |
238 | } | 238 | } |
239 | obj->agp_type = AGP_USER_CACHED_MEMORY; | 239 | obj->cache_level = I915_CACHE_LLC; |
240 | 240 | ||
241 | ret = i915_gem_object_pin(obj, 4096, true); | 241 | ret = i915_gem_object_pin(obj, 4096, true); |
242 | if (ret) | 242 | if (ret) |
@@ -286,7 +286,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
286 | 286 | ||
287 | if (INTEL_INFO(dev)->gen > 3) { | 287 | if (INTEL_INFO(dev)->gen > 3) { |
288 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; | 288 | int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; |
289 | if (IS_GEN6(dev)) | 289 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
290 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; | 290 | mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; |
291 | I915_WRITE(MI_MODE, mode); | 291 | I915_WRITE(MI_MODE, mode); |
292 | } | 292 | } |
@@ -551,10 +551,31 @@ render_ring_put_irq(struct intel_ring_buffer *ring) | |||
551 | 551 | ||
552 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) | 552 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring) |
553 | { | 553 | { |
554 | struct drm_device *dev = ring->dev; | ||
554 | drm_i915_private_t *dev_priv = ring->dev->dev_private; | 555 | drm_i915_private_t *dev_priv = ring->dev->dev_private; |
555 | u32 mmio = IS_GEN6(ring->dev) ? | 556 | u32 mmio = 0; |
556 | RING_HWS_PGA_GEN6(ring->mmio_base) : | 557 | |
557 | RING_HWS_PGA(ring->mmio_base); | 558 | /* The ring status page addresses are no longer next to the rest of |
559 | * the ring registers as of gen7. | ||
560 | */ | ||
561 | if (IS_GEN7(dev)) { | ||
562 | switch (ring->id) { | ||
563 | case RING_RENDER: | ||
564 | mmio = RENDER_HWS_PGA_GEN7; | ||
565 | break; | ||
566 | case RING_BLT: | ||
567 | mmio = BLT_HWS_PGA_GEN7; | ||
568 | break; | ||
569 | case RING_BSD: | ||
570 | mmio = BSD_HWS_PGA_GEN7; | ||
571 | break; | ||
572 | } | ||
573 | } else if (IS_GEN6(ring->dev)) { | ||
574 | mmio = RING_HWS_PGA_GEN6(ring->mmio_base); | ||
575 | } else { | ||
576 | mmio = RING_HWS_PGA(ring->mmio_base); | ||
577 | } | ||
578 | |||
558 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); | 579 | I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); |
559 | POSTING_READ(mmio); | 580 | POSTING_READ(mmio); |
560 | } | 581 | } |
@@ -600,7 +621,7 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
600 | } | 621 | } |
601 | 622 | ||
602 | static bool | 623 | static bool |
603 | ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | 624 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
604 | { | 625 | { |
605 | struct drm_device *dev = ring->dev; | 626 | struct drm_device *dev = ring->dev; |
606 | drm_i915_private_t *dev_priv = dev->dev_private; | 627 | drm_i915_private_t *dev_priv = dev->dev_private; |
@@ -609,71 +630,67 @@ ring_get_irq(struct intel_ring_buffer *ring, u32 flag) | |||
609 | return false; | 630 | return false; |
610 | 631 | ||
611 | spin_lock(&ring->irq_lock); | 632 | spin_lock(&ring->irq_lock); |
612 | if (ring->irq_refcount++ == 0) | 633 | if (ring->irq_refcount++ == 0) { |
613 | ironlake_enable_irq(dev_priv, flag); | 634 | ring->irq_mask &= ~rflag; |
635 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
636 | ironlake_enable_irq(dev_priv, gflag); | ||
637 | } | ||
614 | spin_unlock(&ring->irq_lock); | 638 | spin_unlock(&ring->irq_lock); |
615 | 639 | ||
616 | return true; | 640 | return true; |
617 | } | 641 | } |
618 | 642 | ||
619 | static void | 643 | static void |
620 | ring_put_irq(struct intel_ring_buffer *ring, u32 flag) | 644 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) |
621 | { | 645 | { |
622 | struct drm_device *dev = ring->dev; | 646 | struct drm_device *dev = ring->dev; |
623 | drm_i915_private_t *dev_priv = dev->dev_private; | 647 | drm_i915_private_t *dev_priv = dev->dev_private; |
624 | 648 | ||
625 | spin_lock(&ring->irq_lock); | 649 | spin_lock(&ring->irq_lock); |
626 | if (--ring->irq_refcount == 0) | 650 | if (--ring->irq_refcount == 0) { |
627 | ironlake_disable_irq(dev_priv, flag); | 651 | ring->irq_mask |= rflag; |
652 | I915_WRITE_IMR(ring, ring->irq_mask); | ||
653 | ironlake_disable_irq(dev_priv, gflag); | ||
654 | } | ||
628 | spin_unlock(&ring->irq_lock); | 655 | spin_unlock(&ring->irq_lock); |
629 | } | 656 | } |
630 | 657 | ||
631 | static bool | 658 | static bool |
632 | gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 659 | bsd_ring_get_irq(struct intel_ring_buffer *ring) |
633 | { | 660 | { |
634 | struct drm_device *dev = ring->dev; | 661 | struct drm_device *dev = ring->dev; |
635 | drm_i915_private_t *dev_priv = dev->dev_private; | 662 | drm_i915_private_t *dev_priv = dev->dev_private; |
636 | 663 | ||
637 | if (!dev->irq_enabled) | 664 | if (!dev->irq_enabled) |
638 | return false; | 665 | return false; |
639 | 666 | ||
640 | spin_lock(&ring->irq_lock); | 667 | spin_lock(&ring->irq_lock); |
641 | if (ring->irq_refcount++ == 0) { | 668 | if (ring->irq_refcount++ == 0) { |
642 | ring->irq_mask &= ~rflag; | 669 | if (IS_G4X(dev)) |
643 | I915_WRITE_IMR(ring, ring->irq_mask); | 670 | i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
644 | ironlake_enable_irq(dev_priv, gflag); | 671 | else |
672 | ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
645 | } | 673 | } |
646 | spin_unlock(&ring->irq_lock); | 674 | spin_unlock(&ring->irq_lock); |
647 | 675 | ||
648 | return true; | 676 | return true; |
649 | } | 677 | } |
650 | |||
651 | static void | 678 | static void |
652 | gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) | 679 | bsd_ring_put_irq(struct intel_ring_buffer *ring) |
653 | { | 680 | { |
654 | struct drm_device *dev = ring->dev; | 681 | struct drm_device *dev = ring->dev; |
655 | drm_i915_private_t *dev_priv = dev->dev_private; | 682 | drm_i915_private_t *dev_priv = dev->dev_private; |
656 | 683 | ||
657 | spin_lock(&ring->irq_lock); | 684 | spin_lock(&ring->irq_lock); |
658 | if (--ring->irq_refcount == 0) { | 685 | if (--ring->irq_refcount == 0) { |
659 | ring->irq_mask |= rflag; | 686 | if (IS_G4X(dev)) |
660 | I915_WRITE_IMR(ring, ring->irq_mask); | 687 | i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT); |
661 | ironlake_disable_irq(dev_priv, gflag); | 688 | else |
689 | ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT); | ||
662 | } | 690 | } |
663 | spin_unlock(&ring->irq_lock); | 691 | spin_unlock(&ring->irq_lock); |
664 | } | 692 | } |
665 | 693 | ||
666 | static bool | ||
667 | bsd_ring_get_irq(struct intel_ring_buffer *ring) | ||
668 | { | ||
669 | return ring_get_irq(ring, GT_BSD_USER_INTERRUPT); | ||
670 | } | ||
671 | static void | ||
672 | bsd_ring_put_irq(struct intel_ring_buffer *ring) | ||
673 | { | ||
674 | ring_put_irq(ring, GT_BSD_USER_INTERRUPT); | ||
675 | } | ||
676 | |||
677 | static int | 694 | static int |
678 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) | 695 | ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) |
679 | { | 696 | { |
@@ -759,7 +776,7 @@ static int init_status_page(struct intel_ring_buffer *ring) | |||
759 | ret = -ENOMEM; | 776 | ret = -ENOMEM; |
760 | goto err; | 777 | goto err; |
761 | } | 778 | } |
762 | obj->agp_type = AGP_USER_CACHED_MEMORY; | 779 | obj->cache_level = I915_CACHE_LLC; |
763 | 780 | ||
764 | ret = i915_gem_object_pin(obj, 4096, true); | 781 | ret = i915_gem_object_pin(obj, 4096, true); |
765 | if (ret != 0) { | 782 | if (ret != 0) { |
@@ -800,6 +817,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
800 | INIT_LIST_HEAD(&ring->request_list); | 817 | INIT_LIST_HEAD(&ring->request_list); |
801 | INIT_LIST_HEAD(&ring->gpu_write_list); | 818 | INIT_LIST_HEAD(&ring->gpu_write_list); |
802 | 819 | ||
820 | init_waitqueue_head(&ring->irq_queue); | ||
803 | spin_lock_init(&ring->irq_lock); | 821 | spin_lock_init(&ring->irq_lock); |
804 | ring->irq_mask = ~0; | 822 | ring->irq_mask = ~0; |
805 | 823 | ||
@@ -872,7 +890,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
872 | 890 | ||
873 | /* Disable the ring buffer. The ring must be idle at this point */ | 891 | /* Disable the ring buffer. The ring must be idle at this point */ |
874 | dev_priv = ring->dev->dev_private; | 892 | dev_priv = ring->dev->dev_private; |
875 | ret = intel_wait_ring_buffer(ring, ring->size - 8); | 893 | ret = intel_wait_ring_idle(ring); |
876 | if (ret) | 894 | if (ret) |
877 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | 895 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", |
878 | ring->name, ret); | 896 | ring->name, ret); |
@@ -1333,7 +1351,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) | |||
1333 | drm_i915_private_t *dev_priv = dev->dev_private; | 1351 | drm_i915_private_t *dev_priv = dev->dev_private; |
1334 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; | 1352 | struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; |
1335 | 1353 | ||
1336 | if (IS_GEN6(dev)) | 1354 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
1337 | *ring = gen6_bsd_ring; | 1355 | *ring = gen6_bsd_ring; |
1338 | else | 1356 | else |
1339 | *ring = bsd_ring; | 1357 | *ring = bsd_ring; |