aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorOscar Mateo <oscar.mateo@intel.com>2014-05-22 09:13:35 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-05-22 17:27:25 -0400
commitee1b1e5ef38d22e2447b48b8456a2b2bcf438e65 (patch)
tree0cdef19ebed891d07b9b23040f461d4bda9d1c1a /drivers/gpu
parent8ee149756e4fbaf4462cf3f7377456ec5fff8b63 (diff)
drm/i915: Split the ringbuffers from the rings (2/3)
This refactoring has been performed using the following Coccinelle semantic script: @@ struct intel_engine_cs r; @@ ( - (r).obj + r.buffer->obj | - (r).virtual_start + r.buffer->virtual_start | - (r).head + r.buffer->head | - (r).tail + r.buffer->tail | - (r).space + r.buffer->space | - (r).size + r.buffer->size | - (r).effective_size + r.buffer->effective_size | - (r).last_retired_head + r.buffer->last_retired_head ) @@ struct intel_engine_cs *r; @@ ( - (r)->obj + r->buffer->obj | - (r)->virtual_start + r->buffer->virtual_start | - (r)->head + r->buffer->head | - (r)->tail + r->buffer->tail | - (r)->space + r->buffer->space | - (r)->size + r->buffer->size | - (r)->effective_size + r->buffer->effective_size | - (r)->last_retired_head + r->buffer->last_retired_head ) @@ expression E; @@ ( - LP_RING(E)->obj + LP_RING(E)->buffer->obj | - LP_RING(E)->virtual_start + LP_RING(E)->buffer->virtual_start | - LP_RING(E)->head + LP_RING(E)->buffer->head | - LP_RING(E)->tail + LP_RING(E)->buffer->tail | - LP_RING(E)->space + LP_RING(E)->buffer->space | - LP_RING(E)->size + LP_RING(E)->buffer->size | - LP_RING(E)->effective_size + LP_RING(E)->buffer->effective_size | - LP_RING(E)->last_retired_head + LP_RING(E)->buffer->last_retired_head ) Note: On top of this this patch also removes the now unused ringbuffer fields in intel_engine_cs. Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> [danvet: Add note about fixup patch included here.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c102
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h27
6 files changed, 75 insertions, 92 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 13784fefa67d..234d454923b5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -64,7 +64,7 @@
64 * has access to the ring. 64 * has access to the ring.
65 */ 65 */
66#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \ 66#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
67 if (LP_RING(dev->dev_private)->obj == NULL) \ 67 if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
68 LOCK_TEST_WITH_RETURN(dev, file); \ 68 LOCK_TEST_WITH_RETURN(dev, file); \
69} while (0) 69} while (0)
70 70
@@ -149,17 +149,17 @@ void i915_kernel_lost_context(struct drm_device * dev)
149 if (drm_core_check_feature(dev, DRIVER_MODESET)) 149 if (drm_core_check_feature(dev, DRIVER_MODESET))
150 return; 150 return;
151 151
152 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 152 ring->buffer->head = I915_READ_HEAD(ring) & HEAD_ADDR;
153 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 153 ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
154 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE); 154 ring->buffer->space = ring->buffer->head - (ring->buffer->tail + I915_RING_FREE_SPACE);
155 if (ring->space < 0) 155 if (ring->buffer->space < 0)
156 ring->space += ring->size; 156 ring->buffer->space += ring->buffer->size;
157 157
158 if (!dev->primary->master) 158 if (!dev->primary->master)
159 return; 159 return;
160 160
161 master_priv = dev->primary->master->driver_priv; 161 master_priv = dev->primary->master->driver_priv;
162 if (ring->head == ring->tail && master_priv->sarea_priv) 162 if (ring->buffer->head == ring->buffer->tail && master_priv->sarea_priv)
163 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 163 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
164} 164}
165 165
@@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
202 } 202 }
203 203
204 if (init->ring_size != 0) { 204 if (init->ring_size != 0) {
205 if (LP_RING(dev_priv)->obj != NULL) { 205 if (LP_RING(dev_priv)->buffer->obj != NULL) {
206 i915_dma_cleanup(dev); 206 i915_dma_cleanup(dev);
207 DRM_ERROR("Client tried to initialize ringbuffer in " 207 DRM_ERROR("Client tried to initialize ringbuffer in "
208 "GEM mode\n"); 208 "GEM mode\n");
@@ -239,7 +239,7 @@ static int i915_dma_resume(struct drm_device * dev)
239 239
240 DRM_DEBUG_DRIVER("%s\n", __func__); 240 DRM_DEBUG_DRIVER("%s\n", __func__);
241 241
242 if (ring->virtual_start == NULL) { 242 if (ring->buffer->virtual_start == NULL) {
243 DRM_ERROR("can not ioremap virtual address for" 243 DRM_ERROR("can not ioremap virtual address for"
244 " ring buffer\n"); 244 " ring buffer\n");
245 return -ENOMEM; 245 return -ENOMEM;
@@ -361,7 +361,7 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
361 struct drm_i915_private *dev_priv = dev->dev_private; 361 struct drm_i915_private *dev_priv = dev->dev_private;
362 int i, ret; 362 int i, ret;
363 363
364 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8) 364 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
365 return -EINVAL; 365 return -EINVAL;
366 366
367 for (i = 0; i < dwords;) { 367 for (i = 0; i < dwords;) {
@@ -824,7 +824,7 @@ static int i915_irq_emit(struct drm_device *dev, void *data,
824 if (drm_core_check_feature(dev, DRIVER_MODESET)) 824 if (drm_core_check_feature(dev, DRIVER_MODESET))
825 return -ENODEV; 825 return -ENODEV;
826 826
827 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 827 if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
828 DRM_ERROR("called with no initialization\n"); 828 DRM_ERROR("called with no initialization\n");
829 return -EINVAL; 829 return -EINVAL;
830 } 830 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 382eae74eebf..db7796beca50 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2523,7 +2523,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2523 * of tail of the request to update the last known position 2523 * of tail of the request to update the last known position
2524 * of the GPU head. 2524 * of the GPU head.
2525 */ 2525 */
2526 ring->last_retired_head = request->tail; 2526 ring->buffer->last_retired_head = request->tail;
2527 2527
2528 i915_gem_free_request(request); 2528 i915_gem_free_request(request);
2529 } 2529 }
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 632db42b3f89..87ec60e181a7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -825,8 +825,8 @@ static void i915_record_ring_state(struct drm_device *dev,
825 ering->hws = I915_READ(mmio); 825 ering->hws = I915_READ(mmio);
826 } 826 }
827 827
828 ering->cpu_ring_head = ring->head; 828 ering->cpu_ring_head = ring->buffer->head;
829 ering->cpu_ring_tail = ring->tail; 829 ering->cpu_ring_tail = ring->buffer->tail;
830 830
831 ering->hangcheck_score = ring->hangcheck.score; 831 ering->hangcheck_score = ring->hangcheck.score;
832 ering->hangcheck_action = ring->hangcheck.action; 832 ering->hangcheck_action = ring->hangcheck.action;
@@ -930,7 +930,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
930 } 930 }
931 931
932 error->ring[i].ringbuffer = 932 error->ring[i].ringbuffer =
933 i915_error_ggtt_object_create(dev_priv, ring->obj); 933 i915_error_ggtt_object_create(dev_priv, ring->buffer->obj);
934 934
935 if (ring->status_page.obj) 935 if (ring->status_page.obj)
936 error->ring[i].hws_page = 936 error->ring[i].hws_page =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c28e0dae38f7..152ba128405b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1219,7 +1219,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1219static void notify_ring(struct drm_device *dev, 1219static void notify_ring(struct drm_device *dev,
1220 struct intel_engine_cs *ring) 1220 struct intel_engine_cs *ring)
1221{ 1221{
1222 if (ring->obj == NULL) 1222 if (ring->buffer->obj == NULL)
1223 return; 1223 return;
1224 1224
1225 trace_i915_gem_request_complete(ring); 1225 trace_i915_gem_request_complete(ring);
@@ -2837,10 +2837,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2837 * our ring is smaller than what the hardware (and hence 2837 * our ring is smaller than what the hardware (and hence
2838 * HEAD_ADDR) allows. Also handles wrap-around. 2838 * HEAD_ADDR) allows. Also handles wrap-around.
2839 */ 2839 */
2840 head &= ring->size - 1; 2840 head &= ring->buffer->size - 1;
2841 2841
2842 /* This here seems to blow up */ 2842 /* This here seems to blow up */
2843 cmd = ioread32(ring->virtual_start + head); 2843 cmd = ioread32(ring->buffer->virtual_start + head);
2844 if (cmd == ipehr) 2844 if (cmd == ipehr)
2845 break; 2845 break;
2846 2846
@@ -2850,7 +2850,7 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2850 if (!i) 2850 if (!i)
2851 return NULL; 2851 return NULL;
2852 2852
2853 *seqno = ioread32(ring->virtual_start + head + 4) + 1; 2853 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2854 return semaphore_wait_to_signaller_ring(ring, ipehr); 2854 return semaphore_wait_to_signaller_ring(ring, ipehr);
2855} 2855}
2856 2856
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9b406e424a6f..70f1b8820fa4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -50,7 +50,7 @@ static inline int __ring_space(int head, int tail, int size)
50 50
51static inline int ring_space(struct intel_engine_cs *ring) 51static inline int ring_space(struct intel_engine_cs *ring)
52{ 52{
53 return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size); 53 return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size);
54} 54}
55 55
56static bool intel_ring_stopped(struct intel_engine_cs *ring) 56static bool intel_ring_stopped(struct intel_engine_cs *ring)
@@ -61,10 +61,10 @@ static bool intel_ring_stopped(struct intel_engine_cs *ring)
61 61
62void __intel_ring_advance(struct intel_engine_cs *ring) 62void __intel_ring_advance(struct intel_engine_cs *ring)
63{ 63{
64 ring->tail &= ring->size - 1; 64 ring->buffer->tail &= ring->buffer->size - 1;
65 if (intel_ring_stopped(ring)) 65 if (intel_ring_stopped(ring))
66 return; 66 return;
67 ring->write_tail(ring, ring->tail); 67 ring->write_tail(ring, ring->buffer->tail);
68} 68}
69 69
70static int 70static int
@@ -481,7 +481,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
481{ 481{
482 struct drm_device *dev = ring->dev; 482 struct drm_device *dev = ring->dev;
483 struct drm_i915_private *dev_priv = dev->dev_private; 483 struct drm_i915_private *dev_priv = dev->dev_private;
484 struct drm_i915_gem_object *obj = ring->obj; 484 struct drm_i915_gem_object *obj = ring->buffer->obj;
485 int ret = 0; 485 int ret = 0;
486 486
487 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 487 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -520,7 +520,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
520 * register values. */ 520 * register values. */
521 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); 521 I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
522 I915_WRITE_CTL(ring, 522 I915_WRITE_CTL(ring,
523 ((ring->size - PAGE_SIZE) & RING_NR_PAGES) 523 ((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES)
524 | RING_VALID); 524 | RING_VALID);
525 525
526 /* If the head is still not zero, the ring is dead */ 526 /* If the head is still not zero, the ring is dead */
@@ -540,10 +540,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
540 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 540 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
541 i915_kernel_lost_context(ring->dev); 541 i915_kernel_lost_context(ring->dev);
542 else { 542 else {
543 ring->head = I915_READ_HEAD(ring); 543 ring->buffer->head = I915_READ_HEAD(ring);
544 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 544 ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
545 ring->space = ring_space(ring); 545 ring->buffer->space = ring_space(ring);
546 ring->last_retired_head = -1; 546 ring->buffer->last_retired_head = -1;
547 } 547 }
548 548
549 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 549 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
@@ -1382,14 +1382,14 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
1382 struct drm_i915_gem_object *obj; 1382 struct drm_i915_gem_object *obj;
1383 int ret; 1383 int ret;
1384 1384
1385 if (ring->obj) 1385 if (ring->buffer->obj)
1386 return 0; 1386 return 0;
1387 1387
1388 obj = NULL; 1388 obj = NULL;
1389 if (!HAS_LLC(dev)) 1389 if (!HAS_LLC(dev))
1390 obj = i915_gem_object_create_stolen(dev, ring->size); 1390 obj = i915_gem_object_create_stolen(dev, ring->buffer->size);
1391 if (obj == NULL) 1391 if (obj == NULL)
1392 obj = i915_gem_alloc_object(dev, ring->size); 1392 obj = i915_gem_alloc_object(dev, ring->buffer->size);
1393 if (obj == NULL) 1393 if (obj == NULL)
1394 return -ENOMEM; 1394 return -ENOMEM;
1395 1395
@@ -1401,15 +1401,15 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring)
1401 if (ret) 1401 if (ret)
1402 goto err_unpin; 1402 goto err_unpin;
1403 1403
1404 ring->virtual_start = 1404 ring->buffer->virtual_start =
1405 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), 1405 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1406 ring->size); 1406 ring->buffer->size);
1407 if (ring->virtual_start == NULL) { 1407 if (ring->buffer->virtual_start == NULL) {
1408 ret = -EINVAL; 1408 ret = -EINVAL;
1409 goto err_unpin; 1409 goto err_unpin;
1410 } 1410 }
1411 1411
1412 ring->obj = obj; 1412 ring->buffer->obj = obj;
1413 return 0; 1413 return 0;
1414 1414
1415err_unpin: 1415err_unpin:
@@ -1435,7 +1435,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1435 ring->dev = dev; 1435 ring->dev = dev;
1436 INIT_LIST_HEAD(&ring->active_list); 1436 INIT_LIST_HEAD(&ring->active_list);
1437 INIT_LIST_HEAD(&ring->request_list); 1437 INIT_LIST_HEAD(&ring->request_list);
1438 ring->size = 32 * PAGE_SIZE; 1438 ring->buffer->size = 32 * PAGE_SIZE;
1439 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); 1439 memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno));
1440 1440
1441 init_waitqueue_head(&ring->irq_queue); 1441 init_waitqueue_head(&ring->irq_queue);
@@ -1461,9 +1461,9 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1461 * the TAIL pointer points to within the last 2 cachelines 1461 * the TAIL pointer points to within the last 2 cachelines
1462 * of the buffer. 1462 * of the buffer.
1463 */ 1463 */
1464 ring->effective_size = ring->size; 1464 ring->buffer->effective_size = ring->buffer->size;
1465 if (IS_I830(dev) || IS_845G(dev)) 1465 if (IS_I830(dev) || IS_845G(dev))
1466 ring->effective_size -= 2 * CACHELINE_BYTES; 1466 ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
1467 1467
1468 ret = i915_cmd_parser_init_ring(ring); 1468 ret = i915_cmd_parser_init_ring(ring);
1469 if (ret) 1469 if (ret)
@@ -1485,17 +1485,17 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1485{ 1485{
1486 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1486 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1487 1487
1488 if (ring->obj == NULL) 1488 if (ring->buffer->obj == NULL)
1489 return; 1489 return;
1490 1490
1491 intel_stop_ring_buffer(ring); 1491 intel_stop_ring_buffer(ring);
1492 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1492 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1493 1493
1494 iounmap(ring->virtual_start); 1494 iounmap(ring->buffer->virtual_start);
1495 1495
1496 i915_gem_object_ggtt_unpin(ring->obj); 1496 i915_gem_object_ggtt_unpin(ring->buffer->obj);
1497 drm_gem_object_unreference(&ring->obj->base); 1497 drm_gem_object_unreference(&ring->buffer->obj->base);
1498 ring->obj = NULL; 1498 ring->buffer->obj = NULL;
1499 ring->preallocated_lazy_request = NULL; 1499 ring->preallocated_lazy_request = NULL;
1500 ring->outstanding_lazy_seqno = 0; 1500 ring->outstanding_lazy_seqno = 0;
1501 1501
@@ -1516,17 +1516,17 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1516 u32 seqno = 0; 1516 u32 seqno = 0;
1517 int ret; 1517 int ret;
1518 1518
1519 if (ring->last_retired_head != -1) { 1519 if (ring->buffer->last_retired_head != -1) {
1520 ring->head = ring->last_retired_head; 1520 ring->buffer->head = ring->buffer->last_retired_head;
1521 ring->last_retired_head = -1; 1521 ring->buffer->last_retired_head = -1;
1522 1522
1523 ring->space = ring_space(ring); 1523 ring->buffer->space = ring_space(ring);
1524 if (ring->space >= n) 1524 if (ring->buffer->space >= n)
1525 return 0; 1525 return 0;
1526 } 1526 }
1527 1527
1528 list_for_each_entry(request, &ring->request_list, list) { 1528 list_for_each_entry(request, &ring->request_list, list) {
1529 if (__ring_space(request->tail, ring->tail, ring->size) >= n) { 1529 if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) {
1530 seqno = request->seqno; 1530 seqno = request->seqno;
1531 break; 1531 break;
1532 } 1532 }
@@ -1540,10 +1540,10 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1540 return ret; 1540 return ret;
1541 1541
1542 i915_gem_retire_requests_ring(ring); 1542 i915_gem_retire_requests_ring(ring);
1543 ring->head = ring->last_retired_head; 1543 ring->buffer->head = ring->buffer->last_retired_head;
1544 ring->last_retired_head = -1; 1544 ring->buffer->last_retired_head = -1;
1545 1545
1546 ring->space = ring_space(ring); 1546 ring->buffer->space = ring_space(ring);
1547 return 0; 1547 return 0;
1548} 1548}
1549 1549
@@ -1570,9 +1570,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1570 1570
1571 trace_i915_ring_wait_begin(ring); 1571 trace_i915_ring_wait_begin(ring);
1572 do { 1572 do {
1573 ring->head = I915_READ_HEAD(ring); 1573 ring->buffer->head = I915_READ_HEAD(ring);
1574 ring->space = ring_space(ring); 1574 ring->buffer->space = ring_space(ring);
1575 if (ring->space >= n) { 1575 if (ring->buffer->space >= n) {
1576 ret = 0; 1576 ret = 0;
1577 break; 1577 break;
1578 } 1578 }
@@ -1608,21 +1608,21 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1608static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) 1608static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1609{ 1609{
1610 uint32_t __iomem *virt; 1610 uint32_t __iomem *virt;
1611 int rem = ring->size - ring->tail; 1611 int rem = ring->buffer->size - ring->buffer->tail;
1612 1612
1613 if (ring->space < rem) { 1613 if (ring->buffer->space < rem) {
1614 int ret = ring_wait_for_space(ring, rem); 1614 int ret = ring_wait_for_space(ring, rem);
1615 if (ret) 1615 if (ret)
1616 return ret; 1616 return ret;
1617 } 1617 }
1618 1618
1619 virt = ring->virtual_start + ring->tail; 1619 virt = ring->buffer->virtual_start + ring->buffer->tail;
1620 rem /= 4; 1620 rem /= 4;
1621 while (rem--) 1621 while (rem--)
1622 iowrite32(MI_NOOP, virt++); 1622 iowrite32(MI_NOOP, virt++);
1623 1623
1624 ring->tail = 0; 1624 ring->buffer->tail = 0;
1625 ring->space = ring_space(ring); 1625 ring->buffer->space = ring_space(ring);
1626 1626
1627 return 0; 1627 return 0;
1628} 1628}
@@ -1674,13 +1674,13 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring,
1674{ 1674{
1675 int ret; 1675 int ret;
1676 1676
1677 if (unlikely(ring->tail + bytes > ring->effective_size)) { 1677 if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) {
1678 ret = intel_wrap_ring_buffer(ring); 1678 ret = intel_wrap_ring_buffer(ring);
1679 if (unlikely(ret)) 1679 if (unlikely(ret))
1680 return ret; 1680 return ret;
1681 } 1681 }
1682 1682
1683 if (unlikely(ring->space < bytes)) { 1683 if (unlikely(ring->buffer->space < bytes)) {
1684 ret = ring_wait_for_space(ring, bytes); 1684 ret = ring_wait_for_space(ring, bytes);
1685 if (unlikely(ret)) 1685 if (unlikely(ret))
1686 return ret; 1686 return ret;
@@ -1709,14 +1709,14 @@ int intel_ring_begin(struct intel_engine_cs *ring,
1709 if (ret) 1709 if (ret)
1710 return ret; 1710 return ret;
1711 1711
1712 ring->space -= num_dwords * sizeof(uint32_t); 1712 ring->buffer->space -= num_dwords * sizeof(uint32_t);
1713 return 0; 1713 return 0;
1714} 1714}
1715 1715
1716/* Align the ring tail to a cacheline boundary */ 1716/* Align the ring tail to a cacheline boundary */
1717int intel_ring_cacheline_align(struct intel_engine_cs *ring) 1717int intel_ring_cacheline_align(struct intel_engine_cs *ring)
1718{ 1718{
1719 int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); 1719 int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
1720 int ret; 1720 int ret;
1721 1721
1722 if (num_dwords == 0) 1722 if (num_dwords == 0)
@@ -2094,13 +2094,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2094 INIT_LIST_HEAD(&ring->active_list); 2094 INIT_LIST_HEAD(&ring->active_list);
2095 INIT_LIST_HEAD(&ring->request_list); 2095 INIT_LIST_HEAD(&ring->request_list);
2096 2096
2097 ring->size = size; 2097 ring->buffer->size = size;
2098 ring->effective_size = ring->size; 2098 ring->buffer->effective_size = ring->buffer->size;
2099 if (IS_I830(ring->dev) || IS_845G(ring->dev)) 2099 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2100 ring->effective_size -= 2 * CACHELINE_BYTES; 2100 ring->buffer->effective_size -= 2 * CACHELINE_BYTES;
2101 2101
2102 ring->virtual_start = ioremap_wc(start, size); 2102 ring->buffer->virtual_start = ioremap_wc(start, size);
2103 if (ring->virtual_start == NULL) { 2103 if (ring->buffer->virtual_start == NULL) {
2104 DRM_ERROR("can not ioremap virtual address for" 2104 DRM_ERROR("can not ioremap virtual address for"
2105 " ring buffer\n"); 2105 " ring buffer\n");
2106 ret = -ENOMEM; 2106 ret = -ENOMEM;
@@ -2116,7 +2116,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2116 return 0; 2116 return 0;
2117 2117
2118err_vstart: 2118err_vstart:
2119 iounmap(ring->virtual_start); 2119 iounmap(ring->buffer->virtual_start);
2120err_ringbuf: 2120err_ringbuf:
2121 kfree(ringbuf); 2121 kfree(ringbuf);
2122 ring->buffer = NULL; 2122 ring->buffer = NULL;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a0ac668319d4..c26def080f21 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -91,28 +91,11 @@ struct intel_engine_cs {
91#define I915_NUM_RINGS 5 91#define I915_NUM_RINGS 5
92#define LAST_USER_RING (VECS + 1) 92#define LAST_USER_RING (VECS + 1)
93 u32 mmio_base; 93 u32 mmio_base;
94 void __iomem *virtual_start;
95 struct drm_device *dev; 94 struct drm_device *dev;
96 struct drm_i915_gem_object *obj;
97 struct intel_ringbuffer *buffer; 95 struct intel_ringbuffer *buffer;
98 96
99 u32 head;
100 u32 tail;
101 int space;
102 int size;
103 int effective_size;
104 struct intel_hw_status_page status_page; 97 struct intel_hw_status_page status_page;
105 98
106 /** We track the position of the requests in the ring buffer, and
107 * when each is retired we increment last_retired_head as the GPU
108 * must have finished processing the request and so we know we
109 * can advance the ringbuffer up to that position.
110 *
111 * last_retired_head is set to -1 after the value is consumed so
112 * we can detect new retirements.
113 */
114 u32 last_retired_head;
115
116 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 99 unsigned irq_refcount; /* protected by dev_priv->irq_lock */
117 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 100 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
118 u32 trace_irq_seqno; 101 u32 trace_irq_seqno;
@@ -239,7 +222,7 @@ struct intel_engine_cs {
239static inline bool 222static inline bool
240intel_ring_initialized(struct intel_engine_cs *ring) 223intel_ring_initialized(struct intel_engine_cs *ring)
241{ 224{
242 return ring->buffer && ring->obj; 225 return ring->buffer && ring->buffer->obj;
243} 226}
244 227
245static inline unsigned 228static inline unsigned
@@ -310,12 +293,12 @@ int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring);
310static inline void intel_ring_emit(struct intel_engine_cs *ring, 293static inline void intel_ring_emit(struct intel_engine_cs *ring,
311 u32 data) 294 u32 data)
312{ 295{
313 iowrite32(data, ring->virtual_start + ring->tail); 296 iowrite32(data, ring->buffer->virtual_start + ring->buffer->tail);
314 ring->tail += 4; 297 ring->buffer->tail += 4;
315} 298}
316static inline void intel_ring_advance(struct intel_engine_cs *ring) 299static inline void intel_ring_advance(struct intel_engine_cs *ring)
317{ 300{
318 ring->tail &= ring->size - 1; 301 ring->buffer->tail &= ring->buffer->size - 1;
319} 302}
320void __intel_ring_advance(struct intel_engine_cs *ring); 303void __intel_ring_advance(struct intel_engine_cs *ring);
321 304
@@ -335,7 +318,7 @@ void intel_ring_setup_status_page(struct intel_engine_cs *ring);
335 318
336static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring) 319static inline u32 intel_ring_get_tail(struct intel_engine_cs *ring)
337{ 320{
338 return ring->tail; 321 return ring->buffer->tail;
339} 322}
340 323
341static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring) 324static inline u32 intel_ring_get_seqno(struct intel_engine_cs *ring)