aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c86
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
4 files changed, 65 insertions, 43 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f9c093c08d58..07b62449b9e1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2148,8 +2148,8 @@ i915_gem_flush_ring(struct drm_device *dev,
2148 uint32_t invalidate_domains, 2148 uint32_t invalidate_domains,
2149 uint32_t flush_domains) 2149 uint32_t flush_domains)
2150{ 2150{
2151 ring->flush(ring, invalidate_domains, flush_domains); 2151 if (ring->flush(ring, invalidate_domains, flush_domains) == 0)
2152 i915_gem_process_flushing_list(dev, flush_domains, ring); 2152 i915_gem_process_flushing_list(dev, flush_domains, ring);
2153} 2153}
2154 2154
2155static int i915_ring_idle(struct drm_device *dev, 2155static int i915_ring_idle(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0d42de42868c..1b78b66dd77e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -924,7 +924,7 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
924 struct intel_ring_buffer *ring) 924 struct intel_ring_buffer *ring)
925{ 925{
926 struct drm_i915_gem_request *request; 926 struct drm_i915_gem_request *request;
927 u32 flush_domains; 927 u32 invalidate;
928 928
929 /* 929 /*
930 * Ensure that the commands in the batch buffer are 930 * Ensure that the commands in the batch buffer are
@@ -932,11 +932,13 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
932 * 932 *
933 * The sampler always gets flushed on i965 (sigh). 933 * The sampler always gets flushed on i965 (sigh).
934 */ 934 */
935 flush_domains = 0; 935 invalidate = I915_GEM_DOMAIN_COMMAND;
936 if (INTEL_INFO(dev)->gen >= 4) 936 if (INTEL_INFO(dev)->gen >= 4)
937 flush_domains |= I915_GEM_DOMAIN_SAMPLER; 937 invalidate |= I915_GEM_DOMAIN_SAMPLER;
938 938 if (ring->flush(ring, invalidate, 0)) {
939 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains); 939 i915_gem_next_request_seqno(dev, ring);
940 return;
941 }
940 942
941 /* Add a breadcrumb for the completion of the batch buffer */ 943 /* Add a breadcrumb for the completion of the batch buffer */
942 request = kzalloc(sizeof(*request), GFP_KERNEL); 944 request = kzalloc(sizeof(*request), GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2de0e45464c5..aa8f6abf16f2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,7 +48,7 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
48 return seqno; 48 return seqno;
49} 49}
50 50
51static void 51static int
52render_ring_flush(struct intel_ring_buffer *ring, 52render_ring_flush(struct intel_ring_buffer *ring,
53 u32 invalidate_domains, 53 u32 invalidate_domains,
54 u32 flush_domains) 54 u32 flush_domains)
@@ -56,6 +56,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
56 struct drm_device *dev = ring->dev; 56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 int ret;
59 60
60#if WATCH_EXEC 61#if WATCH_EXEC
61 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 62 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -116,12 +117,16 @@ render_ring_flush(struct intel_ring_buffer *ring,
116#if WATCH_EXEC 117#if WATCH_EXEC
117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 118 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
118#endif 119#endif
119 if (intel_ring_begin(ring, 2) == 0) { 120 ret = intel_ring_begin(ring, 2);
120 intel_ring_emit(ring, cmd); 121 if (ret)
121 intel_ring_emit(ring, MI_NOOP); 122 return ret;
122 intel_ring_advance(ring); 123
123 } 124 intel_ring_emit(ring, cmd);
125 intel_ring_emit(ring, MI_NOOP);
126 intel_ring_advance(ring);
124 } 127 }
128
129 return 0;
125} 130}
126 131
127static void ring_write_tail(struct intel_ring_buffer *ring, 132static void ring_write_tail(struct intel_ring_buffer *ring,
@@ -534,19 +539,24 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
534 POSTING_READ(mmio); 539 POSTING_READ(mmio);
535} 540}
536 541
537static void 542static int
538bsd_ring_flush(struct intel_ring_buffer *ring, 543bsd_ring_flush(struct intel_ring_buffer *ring,
539 u32 invalidate_domains, 544 u32 invalidate_domains,
540 u32 flush_domains) 545 u32 flush_domains)
541{ 546{
547 int ret;
548
542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 549 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
543 return; 550 return 0;
544 551
545 if (intel_ring_begin(ring, 2) == 0) { 552 ret = intel_ring_begin(ring, 2);
546 intel_ring_emit(ring, MI_FLUSH); 553 if (ret)
547 intel_ring_emit(ring, MI_NOOP); 554 return ret;
548 intel_ring_advance(ring); 555
549 } 556 intel_ring_emit(ring, MI_FLUSH);
557 intel_ring_emit(ring, MI_NOOP);
558 intel_ring_advance(ring);
559 return 0;
550} 560}
551 561
552static int 562static int
@@ -980,20 +990,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
980 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 990 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
981} 991}
982 992
983static void gen6_ring_flush(struct intel_ring_buffer *ring, 993static int gen6_ring_flush(struct intel_ring_buffer *ring,
984 u32 invalidate_domains, 994 u32 invalidate_domains,
985 u32 flush_domains) 995 u32 flush_domains)
986{ 996{
997 int ret;
998
987 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 999 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
988 return; 1000 return 0;
989 1001
990 if (intel_ring_begin(ring, 4) == 0) { 1002 ret = intel_ring_begin(ring, 4);
991 intel_ring_emit(ring, MI_FLUSH_DW); 1003 if (ret)
992 intel_ring_emit(ring, 0); 1004 return ret;
993 intel_ring_emit(ring, 0); 1005
994 intel_ring_emit(ring, 0); 1006 intel_ring_emit(ring, MI_FLUSH_DW);
995 intel_ring_advance(ring); 1007 intel_ring_emit(ring, 0);
996 } 1008 intel_ring_emit(ring, 0);
1009 intel_ring_emit(ring, 0);
1010 intel_ring_advance(ring);
1011 return 0;
997} 1012}
998 1013
999static int 1014static int
@@ -1122,20 +1137,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1122 return intel_ring_begin(ring, 4); 1137 return intel_ring_begin(ring, 4);
1123} 1138}
1124 1139
1125static void blt_ring_flush(struct intel_ring_buffer *ring, 1140static int blt_ring_flush(struct intel_ring_buffer *ring,
1126 u32 invalidate_domains, 1141 u32 invalidate_domains,
1127 u32 flush_domains) 1142 u32 flush_domains)
1128{ 1143{
1144 int ret;
1145
1129 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1146 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
1130 return; 1147 return 0;
1131 1148
1132 if (blt_ring_begin(ring, 4) == 0) { 1149 ret = blt_ring_begin(ring, 4);
1133 intel_ring_emit(ring, MI_FLUSH_DW); 1150 if (ret)
1134 intel_ring_emit(ring, 0); 1151 return ret;
1135 intel_ring_emit(ring, 0); 1152
1136 intel_ring_emit(ring, 0); 1153 intel_ring_emit(ring, MI_FLUSH_DW);
1137 intel_ring_advance(ring); 1154 intel_ring_emit(ring, 0);
1138 } 1155 intel_ring_emit(ring, 0);
1156 intel_ring_emit(ring, 0);
1157 intel_ring_advance(ring);
1158 return 0;
1139} 1159}
1140 1160
1141static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1161static void blt_ring_cleanup(struct intel_ring_buffer *ring)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index bbbf505c8b56..5969c2ed1028 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,9 +63,9 @@ struct intel_ring_buffer {
63 63
64 void (*write_tail)(struct intel_ring_buffer *ring, 64 void (*write_tail)(struct intel_ring_buffer *ring,
65 u32 value); 65 u32 value);
66 void (*flush)(struct intel_ring_buffer *ring, 66 int __must_check (*flush)(struct intel_ring_buffer *ring,
67 u32 invalidate_domains, 67 u32 invalidate_domains,
68 u32 flush_domains); 68 u32 flush_domains);
69 int (*add_request)(struct intel_ring_buffer *ring, 69 int (*add_request)(struct intel_ring_buffer *ring,
70 u32 *seqno); 70 u32 *seqno);
71 u32 (*get_seqno)(struct intel_ring_buffer *ring); 71 u32 (*get_seqno)(struct intel_ring_buffer *ring);