aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c111
1 files changed, 64 insertions, 47 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 26362f8495a..cb3508f78bc 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -33,18 +33,35 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static u32 i915_gem_get_seqno(struct drm_device *dev)
37{
38 drm_i915_private_t *dev_priv = dev->dev_private;
39 u32 seqno;
40
41 seqno = dev_priv->next_seqno;
42
43 /* reserve 0 for non-seqno */
44 if (++dev_priv->next_seqno == 0)
45 dev_priv->next_seqno = 1;
46
47 return seqno;
48}
49
36static void 50static void
37render_ring_flush(struct drm_device *dev, 51render_ring_flush(struct drm_device *dev,
38 struct intel_ring_buffer *ring, 52 struct intel_ring_buffer *ring,
39 u32 invalidate_domains, 53 u32 invalidate_domains,
40 u32 flush_domains) 54 u32 flush_domains)
41{ 55{
56 drm_i915_private_t *dev_priv = dev->dev_private;
57 u32 cmd;
58
42#if WATCH_EXEC 59#if WATCH_EXEC
43 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 60 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
44 invalidate_domains, flush_domains); 61 invalidate_domains, flush_domains);
45#endif 62#endif
46 u32 cmd; 63
47 trace_i915_gem_request_flush(dev, ring->next_seqno, 64 trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
48 invalidate_domains, flush_domains); 65 invalidate_domains, flush_domains);
49 66
50 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 67 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
@@ -203,9 +220,13 @@ static int init_render_ring(struct drm_device *dev,
203{ 220{
204 drm_i915_private_t *dev_priv = dev->dev_private; 221 drm_i915_private_t *dev_priv = dev->dev_private;
205 int ret = init_ring_common(dev, ring); 222 int ret = init_ring_common(dev, ring);
223 int mode;
224
206 if (IS_I9XX(dev) && !IS_GEN3(dev)) { 225 if (IS_I9XX(dev) && !IS_GEN3(dev)) {
207 I915_WRITE(MI_MODE, 226 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
208 (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH); 227 if (IS_GEN6(dev))
228 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
229 I915_WRITE(MI_MODE, mode);
209 } 230 }
210 return ret; 231 return ret;
211} 232}
@@ -233,9 +254,10 @@ render_ring_add_request(struct drm_device *dev,
233 struct drm_file *file_priv, 254 struct drm_file *file_priv,
234 u32 flush_domains) 255 u32 flush_domains)
235{ 256{
236 u32 seqno;
237 drm_i915_private_t *dev_priv = dev->dev_private; 257 drm_i915_private_t *dev_priv = dev->dev_private;
238 seqno = intel_ring_get_seqno(dev, ring); 258 u32 seqno;
259
260 seqno = i915_gem_get_seqno(dev);
239 261
240 if (IS_GEN6(dev)) { 262 if (IS_GEN6(dev)) {
241 BEGIN_LP_RING(6); 263 BEGIN_LP_RING(6);
@@ -405,7 +427,9 @@ bsd_ring_add_request(struct drm_device *dev,
405 u32 flush_domains) 427 u32 flush_domains)
406{ 428{
407 u32 seqno; 429 u32 seqno;
408 seqno = intel_ring_get_seqno(dev, ring); 430
431 seqno = i915_gem_get_seqno(dev);
432
409 intel_ring_begin(dev, ring, 4); 433 intel_ring_begin(dev, ring, 4);
410 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); 434 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
411 intel_ring_emit(dev, ring, 435 intel_ring_emit(dev, ring,
@@ -479,7 +503,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
479 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 503 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
480 exec_len = (uint32_t) exec->batch_len; 504 exec_len = (uint32_t) exec->batch_len;
481 505
482 trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1); 506 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
483 507
484 count = nbox ? nbox : 1; 508 count = nbox ? nbox : 1;
485 509
@@ -515,7 +539,16 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
515 intel_ring_advance(dev, ring); 539 intel_ring_advance(dev, ring);
516 } 540 }
517 541
542 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
543 intel_ring_begin(dev, ring, 2);
544 intel_ring_emit(dev, ring, MI_FLUSH |
545 MI_NO_WRITE_FLUSH |
546 MI_INVALIDATE_ISP );
547 intel_ring_emit(dev, ring, MI_NOOP);
548 intel_ring_advance(dev, ring);
549 }
518 /* XXX breadcrumb */ 550 /* XXX breadcrumb */
551
519 return 0; 552 return 0;
520} 553}
521 554
@@ -588,9 +621,10 @@ err:
588int intel_init_ring_buffer(struct drm_device *dev, 621int intel_init_ring_buffer(struct drm_device *dev,
589 struct intel_ring_buffer *ring) 622 struct intel_ring_buffer *ring)
590{ 623{
591 int ret;
592 struct drm_i915_gem_object *obj_priv; 624 struct drm_i915_gem_object *obj_priv;
593 struct drm_gem_object *obj; 625 struct drm_gem_object *obj;
626 int ret;
627
594 ring->dev = dev; 628 ring->dev = dev;
595 629
596 if (I915_NEED_GFX_HWS(dev)) { 630 if (I915_NEED_GFX_HWS(dev)) {
@@ -603,16 +637,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
603 if (obj == NULL) { 637 if (obj == NULL) {
604 DRM_ERROR("Failed to allocate ringbuffer\n"); 638 DRM_ERROR("Failed to allocate ringbuffer\n");
605 ret = -ENOMEM; 639 ret = -ENOMEM;
606 goto cleanup; 640 goto err_hws;
607 } 641 }
608 642
609 ring->gem_object = obj; 643 ring->gem_object = obj;
610 644
611 ret = i915_gem_object_pin(obj, ring->alignment); 645 ret = i915_gem_object_pin(obj, ring->alignment);
612 if (ret != 0) { 646 if (ret)
613 drm_gem_object_unreference(obj); 647 goto err_unref;
614 goto cleanup;
615 }
616 648
617 obj_priv = to_intel_bo(obj); 649 obj_priv = to_intel_bo(obj);
618 ring->map.size = ring->size; 650 ring->map.size = ring->size;
@@ -624,18 +656,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
624 drm_core_ioremap_wc(&ring->map, dev); 656 drm_core_ioremap_wc(&ring->map, dev);
625 if (ring->map.handle == NULL) { 657 if (ring->map.handle == NULL) {
626 DRM_ERROR("Failed to map ringbuffer.\n"); 658 DRM_ERROR("Failed to map ringbuffer.\n");
627 i915_gem_object_unpin(obj);
628 drm_gem_object_unreference(obj);
629 ret = -EINVAL; 659 ret = -EINVAL;
630 goto cleanup; 660 goto err_unpin;
631 } 661 }
632 662
633 ring->virtual_start = ring->map.handle; 663 ring->virtual_start = ring->map.handle;
634 ret = ring->init(dev, ring); 664 ret = ring->init(dev, ring);
635 if (ret != 0) { 665 if (ret)
636 intel_cleanup_ring_buffer(dev, ring); 666 goto err_unmap;
637 return ret;
638 }
639 667
640 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 668 if (!drm_core_check_feature(dev, DRIVER_MODESET))
641 i915_kernel_lost_context(dev); 669 i915_kernel_lost_context(dev);
@@ -649,7 +677,15 @@ int intel_init_ring_buffer(struct drm_device *dev,
649 INIT_LIST_HEAD(&ring->active_list); 677 INIT_LIST_HEAD(&ring->active_list);
650 INIT_LIST_HEAD(&ring->request_list); 678 INIT_LIST_HEAD(&ring->request_list);
651 return ret; 679 return ret;
652cleanup: 680
681err_unmap:
682 drm_core_ioremapfree(&ring->map, dev);
683err_unpin:
684 i915_gem_object_unpin(obj);
685err_unref:
686 drm_gem_object_unreference(obj);
687 ring->gem_object = NULL;
688err_hws:
653 cleanup_status_page(dev, ring); 689 cleanup_status_page(dev, ring);
654 return ret; 690 return ret;
655} 691}
@@ -682,9 +718,11 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
682 } 718 }
683 719
684 virt = (unsigned int *)(ring->virtual_start + ring->tail); 720 virt = (unsigned int *)(ring->virtual_start + ring->tail);
685 rem /= 4; 721 rem /= 8;
686 while (rem--) 722 while (rem--) {
723 *virt++ = MI_NOOP;
687 *virt++ = MI_NOOP; 724 *virt++ = MI_NOOP;
725 }
688 726
689 ring->tail = 0; 727 ring->tail = 0;
690 ring->space = ring->head - 8; 728 ring->space = ring->head - 8;
@@ -729,21 +767,14 @@ void intel_ring_begin(struct drm_device *dev,
729 intel_wrap_ring_buffer(dev, ring); 767 intel_wrap_ring_buffer(dev, ring);
730 if (unlikely(ring->space < n)) 768 if (unlikely(ring->space < n))
731 intel_wait_ring_buffer(dev, ring, n); 769 intel_wait_ring_buffer(dev, ring, n);
732}
733 770
734void intel_ring_emit(struct drm_device *dev, 771 ring->space -= n;
735 struct intel_ring_buffer *ring, unsigned int data)
736{
737 unsigned int *virt = ring->virtual_start + ring->tail;
738 *virt = data;
739 ring->tail += 4;
740 ring->tail &= ring->size - 1;
741 ring->space -= 4;
742} 772}
743 773
744void intel_ring_advance(struct drm_device *dev, 774void intel_ring_advance(struct drm_device *dev,
745 struct intel_ring_buffer *ring) 775 struct intel_ring_buffer *ring)
746{ 776{
777 ring->tail &= ring->size - 1;
747 ring->advance_ring(dev, ring); 778 ring->advance_ring(dev, ring);
748} 779}
749 780
@@ -762,18 +793,6 @@ void intel_fill_struct(struct drm_device *dev,
762 intel_ring_advance(dev, ring); 793 intel_ring_advance(dev, ring);
763} 794}
764 795
765u32 intel_ring_get_seqno(struct drm_device *dev,
766 struct intel_ring_buffer *ring)
767{
768 u32 seqno;
769 seqno = ring->next_seqno;
770
771 /* reserve 0 for non-seqno */
772 if (++ring->next_seqno == 0)
773 ring->next_seqno = 1;
774 return seqno;
775}
776
777struct intel_ring_buffer render_ring = { 796struct intel_ring_buffer render_ring = {
778 .name = "render ring", 797 .name = "render ring",
779 .regs = { 798 .regs = {
@@ -791,7 +810,6 @@ struct intel_ring_buffer render_ring = {
791 .head = 0, 810 .head = 0,
792 .tail = 0, 811 .tail = 0,
793 .space = 0, 812 .space = 0,
794 .next_seqno = 1,
795 .user_irq_refcount = 0, 813 .user_irq_refcount = 0,
796 .irq_gem_seqno = 0, 814 .irq_gem_seqno = 0,
797 .waiting_gem_seqno = 0, 815 .waiting_gem_seqno = 0,
@@ -830,7 +848,6 @@ struct intel_ring_buffer bsd_ring = {
830 .head = 0, 848 .head = 0,
831 .tail = 0, 849 .tail = 0,
832 .space = 0, 850 .space = 0,
833 .next_seqno = 1,
834 .user_irq_refcount = 0, 851 .user_irq_refcount = 0,
835 .irq_gem_seqno = 0, 852 .irq_gem_seqno = 0,
836 .waiting_gem_seqno = 0, 853 .waiting_gem_seqno = 0,