aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-11-27 11:22:54 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-11-29 05:43:53 -0500
commit3e9605018ab3e333d51cc90fccfde2031886763b (patch)
tree101b6600bbe9977dc0f9d296329d03158eba1075 /drivers/gpu
parentb662a0663230853fccdfceeda5db031f5d4b657c (diff)
drm/i915: Rearrange code to only have a single method for waiting upon the ring
Replace the wait for the ring to be clear with the more common wait for the ring to be idle. The principle advantage is one less exported intel_ring_wait function, and the removal of a hardcoded value. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c73
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h9
5 files changed, 58 insertions, 61 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 4ea331b931f..80ed75117b6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -592,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
592 592
593static int i915_quiescent(struct drm_device *dev) 593static int i915_quiescent(struct drm_device *dev)
594{ 594{
595 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
596
597 i915_kernel_lost_context(dev); 595 i915_kernel_lost_context(dev);
598 return intel_wait_ring_idle(ring); 596 return intel_ring_idle(LP_RING(dev->dev_private));
599} 597}
600 598
601static int i915_flush_ioctl(struct drm_device *dev, void *data, 599static int i915_flush_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e594435eec9..85a09482c2a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2480,29 +2480,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2480 return 0; 2480 return 0;
2481} 2481}
2482 2482
2483static int i915_ring_idle(struct intel_ring_buffer *ring)
2484{
2485 u32 seqno;
2486 int ret;
2487
2488 /* We need to add any requests required to flush the objects and ring */
2489 if (ring->outstanding_lazy_request) {
2490 ret = i915_add_request(ring, NULL, NULL);
2491 if (ret)
2492 return ret;
2493 }
2494
2495 /* Wait upon the last request to be completed */
2496 if (list_empty(&ring->request_list))
2497 return 0;
2498
2499 seqno = list_entry(ring->request_list.prev,
2500 struct drm_i915_gem_request,
2501 list)->seqno;
2502
2503 return i915_wait_seqno(ring, seqno);
2504}
2505
2506int i915_gpu_idle(struct drm_device *dev) 2483int i915_gpu_idle(struct drm_device *dev)
2507{ 2484{
2508 drm_i915_private_t *dev_priv = dev->dev_private; 2485 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2515,7 +2492,7 @@ int i915_gpu_idle(struct drm_device *dev)
2515 if (ret) 2492 if (ret)
2516 return ret; 2493 return ret;
2517 2494
2518 ret = i915_ring_idle(ring); 2495 ret = intel_ring_idle(ring);
2519 if (ret) 2496 if (ret)
2520 return ret; 2497 return ret;
2521 } 2498 }
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 58c2f210154..f595b8d56cc 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2653,6 +2653,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2653{ 2653{
2654 struct drm_i915_private *dev_priv = dev->dev_private; 2654 struct drm_i915_private *dev_priv = dev->dev_private;
2655 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 2655 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2656 bool was_interruptible;
2656 int ret; 2657 int ret;
2657 2658
2658 /* rc6 disabled by default due to repeated reports of hanging during 2659 /* rc6 disabled by default due to repeated reports of hanging during
@@ -2667,6 +2668,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2667 if (ret) 2668 if (ret)
2668 return; 2669 return;
2669 2670
2671 was_interruptible = dev_priv->mm.interruptible;
2672 dev_priv->mm.interruptible = false;
2673
2670 /* 2674 /*
2671 * GPU can automatically power down the render unit if given a page 2675 * GPU can automatically power down the render unit if given a page
2672 * to save state. 2676 * to save state.
@@ -2674,6 +2678,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2674 ret = intel_ring_begin(ring, 6); 2678 ret = intel_ring_begin(ring, 6);
2675 if (ret) { 2679 if (ret) {
2676 ironlake_teardown_rc6(dev); 2680 ironlake_teardown_rc6(dev);
2681 dev_priv->mm.interruptible = was_interruptible;
2677 return; 2682 return;
2678 } 2683 }
2679 2684
@@ -2694,7 +2699,8 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2694 * does an implicit flush, combined with MI_FLUSH above, it should be 2699 * does an implicit flush, combined with MI_FLUSH above, it should be
2695 * safe to assume that renderctx is valid 2700 * safe to assume that renderctx is valid
2696 */ 2701 */
2697 ret = intel_wait_ring_idle(ring); 2702 ret = intel_ring_idle(ring);
2703 dev_priv->mm.interruptible = was_interruptible;
2698 if (ret) { 2704 if (ret) {
2699 DRM_ERROR("failed to enable ironlake power power savings\n"); 2705 DRM_ERROR("failed to enable ironlake power power savings\n");
2700 ironlake_teardown_rc6(dev); 2706 ironlake_teardown_rc6(dev);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e4682cdc00b..bc7cf7c6310 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1175,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1175 1175
1176 /* Disable the ring buffer. The ring must be idle at this point */ 1176 /* Disable the ring buffer. The ring must be idle at this point */
1177 dev_priv = ring->dev->dev_private; 1177 dev_priv = ring->dev->dev_private;
1178 ret = intel_wait_ring_idle(ring); 1178 ret = intel_ring_idle(ring);
1179 if (ret) 1179 if (ret)
1180 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1180 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1181 ring->name, ret); 1181 ring->name, ret);
@@ -1194,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1194 cleanup_status_page(ring); 1194 cleanup_status_page(ring);
1195} 1195}
1196 1196
1197static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1198{
1199 uint32_t __iomem *virt;
1200 int rem = ring->size - ring->tail;
1201
1202 if (ring->space < rem) {
1203 int ret = intel_wait_ring_buffer(ring, rem);
1204 if (ret)
1205 return ret;
1206 }
1207
1208 virt = ring->virtual_start + ring->tail;
1209 rem /= 4;
1210 while (rem--)
1211 iowrite32(MI_NOOP, virt++);
1212
1213 ring->tail = 0;
1214 ring->space = ring_space(ring);
1215
1216 return 0;
1217}
1218
1219static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1197static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1220{ 1198{
1221 int ret; 1199 int ret;
@@ -1284,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1284 return 0; 1262 return 0;
1285} 1263}
1286 1264
1287int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1265static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1288{ 1266{
1289 struct drm_device *dev = ring->dev; 1267 struct drm_device *dev = ring->dev;
1290 struct drm_i915_private *dev_priv = dev->dev_private; 1268 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1327,6 +1305,51 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1327 return -EBUSY; 1305 return -EBUSY;
1328} 1306}
1329 1307
1308static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1309{
1310 uint32_t __iomem *virt;
1311 int rem = ring->size - ring->tail;
1312
1313 if (ring->space < rem) {
1314 int ret = ring_wait_for_space(ring, rem);
1315 if (ret)
1316 return ret;
1317 }
1318
1319 virt = ring->virtual_start + ring->tail;
1320 rem /= 4;
1321 while (rem--)
1322 iowrite32(MI_NOOP, virt++);
1323
1324 ring->tail = 0;
1325 ring->space = ring_space(ring);
1326
1327 return 0;
1328}
1329
1330int intel_ring_idle(struct intel_ring_buffer *ring)
1331{
1332 u32 seqno;
1333 int ret;
1334
1335 /* We need to add any requests required to flush the objects and ring */
1336 if (ring->outstanding_lazy_request) {
1337 ret = i915_add_request(ring, NULL, NULL);
1338 if (ret)
1339 return ret;
1340 }
1341
1342 /* Wait upon the last request to be completed */
1343 if (list_empty(&ring->request_list))
1344 return 0;
1345
1346 seqno = list_entry(ring->request_list.prev,
1347 struct drm_i915_gem_request,
1348 list)->seqno;
1349
1350 return i915_wait_seqno(ring, seqno);
1351}
1352
1330static int 1353static int
1331intel_ring_alloc_seqno(struct intel_ring_buffer *ring) 1354intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1332{ 1355{
@@ -1359,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1359 } 1382 }
1360 1383
1361 if (unlikely(ring->space < n)) { 1384 if (unlikely(ring->space < n)) {
1362 ret = intel_wait_ring_buffer(ring, n); 1385 ret = ring_wait_for_space(ring, n);
1363 if (unlikely(ret)) 1386 if (unlikely(ret))
1364 return ret; 1387 return ret;
1365 } 1388 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0e613026d00..d4b7416fa1b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -187,22 +187,15 @@ intel_read_status_page(struct intel_ring_buffer *ring,
187 187
188void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 188void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
189 189
190int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
191static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
192{
193 return intel_wait_ring_buffer(ring, ring->size - 8);
194}
195
196int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 190int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
197
198static inline void intel_ring_emit(struct intel_ring_buffer *ring, 191static inline void intel_ring_emit(struct intel_ring_buffer *ring,
199 u32 data) 192 u32 data)
200{ 193{
201 iowrite32(data, ring->virtual_start + ring->tail); 194 iowrite32(data, ring->virtual_start + ring->tail);
202 ring->tail += 4; 195 ring->tail += 4;
203} 196}
204
205void intel_ring_advance(struct intel_ring_buffer *ring); 197void intel_ring_advance(struct intel_ring_buffer *ring);
198int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
206 199
207int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 200int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
208int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 201int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);