diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 30 |
1 files changed, 11 insertions, 19 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 445f27efe67..789c47801ba 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -62,18 +62,9 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
62 | u32 flush_domains) | 62 | u32 flush_domains) |
63 | { | 63 | { |
64 | struct drm_device *dev = ring->dev; | 64 | struct drm_device *dev = ring->dev; |
65 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
66 | u32 cmd; | 65 | u32 cmd; |
67 | int ret; | 66 | int ret; |
68 | 67 | ||
69 | #if WATCH_EXEC | ||
70 | DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
71 | invalidate_domains, flush_domains); | ||
72 | #endif | ||
73 | |||
74 | trace_i915_gem_request_flush(dev, dev_priv->next_seqno, | ||
75 | invalidate_domains, flush_domains); | ||
76 | |||
77 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { | 68 | if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { |
78 | /* | 69 | /* |
79 | * read/write caches: | 70 | * read/write caches: |
@@ -122,9 +113,6 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
122 | (IS_G4X(dev) || IS_GEN5(dev))) | 113 | (IS_G4X(dev) || IS_GEN5(dev))) |
123 | cmd |= MI_INVALIDATE_ISP; | 114 | cmd |= MI_INVALIDATE_ISP; |
124 | 115 | ||
125 | #if WATCH_EXEC | ||
126 | DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
127 | #endif | ||
128 | ret = intel_ring_begin(ring, 2); | 116 | ret = intel_ring_begin(ring, 2); |
129 | if (ret) | 117 | if (ret) |
130 | return ret; | 118 | return ret; |
@@ -612,7 +600,6 @@ ring_add_request(struct intel_ring_buffer *ring, | |||
612 | intel_ring_emit(ring, MI_USER_INTERRUPT); | 600 | intel_ring_emit(ring, MI_USER_INTERRUPT); |
613 | intel_ring_advance(ring); | 601 | intel_ring_advance(ring); |
614 | 602 | ||
615 | DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); | ||
616 | *result = seqno; | 603 | *result = seqno; |
617 | return 0; | 604 | return 0; |
618 | } | 605 | } |
@@ -715,11 +702,8 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, | |||
715 | u32 offset, u32 len) | 702 | u32 offset, u32 len) |
716 | { | 703 | { |
717 | struct drm_device *dev = ring->dev; | 704 | struct drm_device *dev = ring->dev; |
718 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
719 | int ret; | 705 | int ret; |
720 | 706 | ||
721 | trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); | ||
722 | |||
723 | if (IS_I830(dev) || IS_845G(dev)) { | 707 | if (IS_I830(dev) || IS_845G(dev)) { |
724 | ret = intel_ring_begin(ring, 4); | 708 | ret = intel_ring_begin(ring, 4); |
725 | if (ret) | 709 | if (ret) |
@@ -894,6 +878,10 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) | |||
894 | /* Disable the ring buffer. The ring must be idle at this point */ | 878 | /* Disable the ring buffer. The ring must be idle at this point */ |
895 | dev_priv = ring->dev->dev_private; | 879 | dev_priv = ring->dev->dev_private; |
896 | ret = intel_wait_ring_buffer(ring, ring->size - 8); | 880 | ret = intel_wait_ring_buffer(ring, ring->size - 8); |
881 | if (ret) | ||
882 | DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", | ||
883 | ring->name, ret); | ||
884 | |||
897 | I915_WRITE_CTL(ring, 0); | 885 | I915_WRITE_CTL(ring, 0); |
898 | 886 | ||
899 | drm_core_ioremapfree(&ring->map, ring->dev); | 887 | drm_core_ioremapfree(&ring->map, ring->dev); |
@@ -950,13 +938,13 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
950 | return 0; | 938 | return 0; |
951 | } | 939 | } |
952 | 940 | ||
953 | trace_i915_ring_wait_begin (dev); | 941 | trace_i915_ring_wait_begin(ring); |
954 | end = jiffies + 3 * HZ; | 942 | end = jiffies + 3 * HZ; |
955 | do { | 943 | do { |
956 | ring->head = I915_READ_HEAD(ring); | 944 | ring->head = I915_READ_HEAD(ring); |
957 | ring->space = ring_space(ring); | 945 | ring->space = ring_space(ring); |
958 | if (ring->space >= n) { | 946 | if (ring->space >= n) { |
959 | trace_i915_ring_wait_end(dev); | 947 | trace_i915_ring_wait_end(ring); |
960 | return 0; | 948 | return 0; |
961 | } | 949 | } |
962 | 950 | ||
@@ -970,16 +958,20 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) | |||
970 | if (atomic_read(&dev_priv->mm.wedged)) | 958 | if (atomic_read(&dev_priv->mm.wedged)) |
971 | return -EAGAIN; | 959 | return -EAGAIN; |
972 | } while (!time_after(jiffies, end)); | 960 | } while (!time_after(jiffies, end)); |
973 | trace_i915_ring_wait_end (dev); | 961 | trace_i915_ring_wait_end(ring); |
974 | return -EBUSY; | 962 | return -EBUSY; |
975 | } | 963 | } |
976 | 964 | ||
977 | int intel_ring_begin(struct intel_ring_buffer *ring, | 965 | int intel_ring_begin(struct intel_ring_buffer *ring, |
978 | int num_dwords) | 966 | int num_dwords) |
979 | { | 967 | { |
968 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | ||
980 | int n = 4*num_dwords; | 969 | int n = 4*num_dwords; |
981 | int ret; | 970 | int ret; |
982 | 971 | ||
972 | if (unlikely(atomic_read(&dev_priv->mm.wedged))) | ||
973 | return -EIO; | ||
974 | |||
983 | if (unlikely(ring->tail + n > ring->effective_size)) { | 975 | if (unlikely(ring->tail + n > ring->effective_size)) { |
984 | ret = intel_wrap_ring_buffer(ring); | 976 | ret = intel_wrap_ring_buffer(ring); |
985 | if (unlikely(ret)) | 977 | if (unlikely(ret)) |