diff options
author | Oscar Mateo <oscar.mateo@intel.com> | 2014-05-22 09:13:35 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-05-22 17:27:25 -0400 |
commit | ee1b1e5ef38d22e2447b48b8456a2b2bcf438e65 (patch) | |
tree | 0cdef19ebed891d07b9b23040f461d4bda9d1c1a /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 8ee149756e4fbaf4462cf3f7377456ec5fff8b63 (diff) |
drm/i915: Split the ringbuffers from the rings (2/3)
This refactoring has been performed using the following Coccinelle
semantic script:
@@
struct intel_engine_cs r;
@@
(
- (r).obj
+ r.buffer->obj
|
- (r).virtual_start
+ r.buffer->virtual_start
|
- (r).head
+ r.buffer->head
|
- (r).tail
+ r.buffer->tail
|
- (r).space
+ r.buffer->space
|
- (r).size
+ r.buffer->size
|
- (r).effective_size
+ r.buffer->effective_size
|
- (r).last_retired_head
+ r.buffer->last_retired_head
)
@@
struct intel_engine_cs *r;
@@
(
- (r)->obj
+ r->buffer->obj
|
- (r)->virtual_start
+ r->buffer->virtual_start
|
- (r)->head
+ r->buffer->head
|
- (r)->tail
+ r->buffer->tail
|
- (r)->space
+ r->buffer->space
|
- (r)->size
+ r->buffer->size
|
- (r)->effective_size
+ r->buffer->effective_size
|
- (r)->last_retired_head
+ r->buffer->last_retired_head
)
@@
expression E;
@@
(
- LP_RING(E)->obj
+ LP_RING(E)->buffer->obj
|
- LP_RING(E)->virtual_start
+ LP_RING(E)->buffer->virtual_start
|
- LP_RING(E)->head
+ LP_RING(E)->buffer->head
|
- LP_RING(E)->tail
+ LP_RING(E)->buffer->tail
|
- LP_RING(E)->space
+ LP_RING(E)->buffer->space
|
- LP_RING(E)->size
+ LP_RING(E)->buffer->size
|
- LP_RING(E)->effective_size
+ LP_RING(E)->buffer->effective_size
|
- LP_RING(E)->last_retired_head
+ LP_RING(E)->buffer->last_retired_head
)
Note: On top of this this patch also removes the now unused ringbuffer
fields in intel_engine_cs.
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
[danvet: Add note about fixup patch included here.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 102 |
1 files changed, 51 insertions, 51 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 9b406e424a6f..70f1b8820fa4 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -50,7 +50,7 @@ static inline int __ring_space(int head, int tail, int size) | |||
50 | 50 | ||
51 | static inline int ring_space(struct intel_engine_cs *ring) | 51 | static inline int ring_space(struct intel_engine_cs *ring) |
52 | { | 52 | { |
53 | return __ring_space(ring->head & HEAD_ADDR, ring->tail, ring->size); | 53 | return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size); |
54 | } | 54 | } |
55 | 55 | ||
56 | static bool intel_ring_stopped(struct intel_engine_cs *ring) | 56 | static bool intel_ring_stopped(struct intel_engine_cs *ring) |
@@ -61,10 +61,10 @@ static bool intel_ring_stopped(struct intel_engine_cs *ring) | |||
61 | 61 | ||
62 | void __intel_ring_advance(struct intel_engine_cs *ring) | 62 | void __intel_ring_advance(struct intel_engine_cs *ring) |
63 | { | 63 | { |
64 | ring->tail &= ring->size - 1; | 64 | ring->buffer->tail &= ring->buffer->size - 1; |
65 | if (intel_ring_stopped(ring)) | 65 | if (intel_ring_stopped(ring)) |
66 | return; | 66 | return; |
67 | ring->write_tail(ring, ring->tail); | 67 | ring->write_tail(ring, ring->buffer->tail); |
68 | } | 68 | } |
69 | 69 | ||
70 | static int | 70 | static int |
@@ -481,7 +481,7 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
481 | { | 481 | { |
482 | struct drm_device *dev = ring->dev; | 482 | struct drm_device *dev = ring->dev; |
483 | struct drm_i915_private *dev_priv = dev->dev_private; | 483 | struct drm_i915_private *dev_priv = dev->dev_private; |
484 | struct drm_i915_gem_object *obj = ring->obj; | 484 | struct drm_i915_gem_object *obj = ring->buffer->obj; |
485 | int ret = 0; | 485 | int ret = 0; |
486 | 486 | ||
487 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 487 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
@@ -520,7 +520,7 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
520 | * register values. */ | 520 | * register values. */ |
521 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); | 521 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); |
522 | I915_WRITE_CTL(ring, | 522 | I915_WRITE_CTL(ring, |
523 | ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | 523 | ((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES) |
524 | | RING_VALID); | 524 | | RING_VALID); |
525 | 525 | ||
526 | /* If the head is still not zero, the ring is dead */ | 526 | /* If the head is still not zero, the ring is dead */ |
@@ -540,10 +540,10 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
540 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 540 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
541 | i915_kernel_lost_context(ring->dev); | 541 | i915_kernel_lost_context(ring->dev); |
542 | else { | 542 | else { |
543 | ring->head = I915_READ_HEAD(ring); | 543 | ring->buffer->head = I915_READ_HEAD(ring); |
544 | ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 544 | ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
545 | ring->space = ring_space(ring); | 545 | ring->buffer->space = ring_space(ring); |
546 | ring->last_retired_head = -1; | 546 | ring->buffer->last_retired_head = -1; |
547 | } | 547 | } |
548 | 548 | ||
549 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 549 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
@@ -1382,14 +1382,14 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) | |||
1382 | struct drm_i915_gem_object *obj; | 1382 | struct drm_i915_gem_object *obj; |
1383 | int ret; | 1383 | int ret; |
1384 | 1384 | ||
1385 | if (ring->obj) | 1385 | if (ring->buffer->obj) |
1386 | return 0; | 1386 | return 0; |
1387 | 1387 | ||
1388 | obj = NULL; | 1388 | obj = NULL; |
1389 | if (!HAS_LLC(dev)) | 1389 | if (!HAS_LLC(dev)) |
1390 | obj = i915_gem_object_create_stolen(dev, ring->size); | 1390 | obj = i915_gem_object_create_stolen(dev, ring->buffer->size); |
1391 | if (obj == NULL) | 1391 | if (obj == NULL) |
1392 | obj = i915_gem_alloc_object(dev, ring->size); | 1392 | obj = i915_gem_alloc_object(dev, ring->buffer->size); |
1393 | if (obj == NULL) | 1393 | if (obj == NULL) |
1394 | return -ENOMEM; | 1394 | return -ENOMEM; |
1395 | 1395 | ||
@@ -1401,15 +1401,15 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) | |||
1401 | if (ret) | 1401 | if (ret) |
1402 | goto err_unpin; | 1402 | goto err_unpin; |
1403 | 1403 | ||
1404 | ring->virtual_start = | 1404 | ring->buffer->virtual_start = |
1405 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | 1405 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
1406 | ring->size); | 1406 | ring->buffer->size); |
1407 | if (ring->virtual_start == NULL) { | 1407 | if (ring->buffer->virtual_start == NULL) { |
1408 | ret = -EINVAL; | 1408 | ret = -EINVAL; |
1409 | goto err_unpin; | 1409 | goto err_unpin; |
1410 | } | 1410 | } |
1411 | 1411 | ||
1412 | ring->obj = obj; | 1412 | ring->buffer->obj = obj; |
1413 | return 0; | 1413 | return 0; |
1414 | 1414 | ||
1415 | err_unpin: | 1415 | err_unpin: |
@@ -1435,7 +1435,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1435 | ring->dev = dev; | 1435 | ring->dev = dev; |
1436 | INIT_LIST_HEAD(&ring->active_list); | 1436 | INIT_LIST_HEAD(&ring->active_list); |
1437 | INIT_LIST_HEAD(&ring->request_list); | 1437 | INIT_LIST_HEAD(&ring->request_list); |
1438 | ring->size = 32 * PAGE_SIZE; | 1438 | ring->buffer->size = 32 * PAGE_SIZE; |
1439 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 1439 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
1440 | 1440 | ||
1441 | init_waitqueue_head(&ring->irq_queue); | 1441 | init_waitqueue_head(&ring->irq_queue); |
@@ -1461,9 +1461,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1461 | * the TAIL pointer points to within the last 2 cachelines | 1461 | * the TAIL pointer points to within the last 2 cachelines |
1462 | * of the buffer. | 1462 | * of the buffer. |
1463 | */ | 1463 | */ |
1464 | ring->effective_size = ring->size; | 1464 | ring->buffer->effective_size = ring->buffer->size; |
1465 | if (IS_I830(dev) || IS_845G(dev)) | 1465 | if (IS_I830(dev) || IS_845G(dev)) |
1466 | ring->effective_size -= 2 * CACHELINE_BYTES; | 1466 | ring->buffer->effective_size -= 2 * CACHELINE_BYTES; |
1467 | 1467 | ||
1468 | ret = i915_cmd_parser_init_ring(ring); | 1468 | ret = i915_cmd_parser_init_ring(ring); |
1469 | if (ret) | 1469 | if (ret) |
@@ -1485,17 +1485,17 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
1485 | { | 1485 | { |
1486 | struct drm_i915_private *dev_priv = to_i915(ring->dev); | 1486 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
1487 | 1487 | ||
1488 | if (ring->obj == NULL) | 1488 | if (ring->buffer->obj == NULL) |
1489 | return; | 1489 | return; |
1490 | 1490 | ||
1491 | intel_stop_ring_buffer(ring); | 1491 | intel_stop_ring_buffer(ring); |
1492 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1492 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1493 | 1493 | ||
1494 | iounmap(ring->virtual_start); | 1494 | iounmap(ring->buffer->virtual_start); |
1495 | 1495 | ||
1496 | i915_gem_object_ggtt_unpin(ring->obj); | 1496 | i915_gem_object_ggtt_unpin(ring->buffer->obj); |
1497 | drm_gem_object_unreference(&ring->obj->base); | 1497 | drm_gem_object_unreference(&ring->buffer->obj->base); |
1498 | ring->obj = NULL; | 1498 | ring->buffer->obj = NULL; |
1499 | ring->preallocated_lazy_request = NULL; | 1499 | ring->preallocated_lazy_request = NULL; |
1500 | ring->outstanding_lazy_seqno = 0; | 1500 | ring->outstanding_lazy_seqno = 0; |
1501 | 1501 | ||
@@ -1516,17 +1516,17 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1516 | u32 seqno = 0; | 1516 | u32 seqno = 0; |
1517 | int ret; | 1517 | int ret; |
1518 | 1518 | ||
1519 | if (ring->last_retired_head != -1) { | 1519 | if (ring->buffer->last_retired_head != -1) { |
1520 | ring->head = ring->last_retired_head; | 1520 | ring->buffer->head = ring->buffer->last_retired_head; |
1521 | ring->last_retired_head = -1; | 1521 | ring->buffer->last_retired_head = -1; |
1522 | 1522 | ||
1523 | ring->space = ring_space(ring); | 1523 | ring->buffer->space = ring_space(ring); |
1524 | if (ring->space >= n) | 1524 | if (ring->buffer->space >= n) |
1525 | return 0; | 1525 | return 0; |
1526 | } | 1526 | } |
1527 | 1527 | ||
1528 | list_for_each_entry(request, &ring->request_list, list) { | 1528 | list_for_each_entry(request, &ring->request_list, list) { |
1529 | if (__ring_space(request->tail, ring->tail, ring->size) >= n) { | 1529 | if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) { |
1530 | seqno = request->seqno; | 1530 | seqno = request->seqno; |
1531 | break; | 1531 | break; |
1532 | } | 1532 | } |
@@ -1540,10 +1540,10 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1540 | return ret; | 1540 | return ret; |
1541 | 1541 | ||
1542 | i915_gem_retire_requests_ring(ring); | 1542 | i915_gem_retire_requests_ring(ring); |
1543 | ring->head = ring->last_retired_head; | 1543 | ring->buffer->head = ring->buffer->last_retired_head; |
1544 | ring->last_retired_head = -1; | 1544 | ring->buffer->last_retired_head = -1; |
1545 | 1545 | ||
1546 | ring->space = ring_space(ring); | 1546 | ring->buffer->space = ring_space(ring); |
1547 | return 0; | 1547 | return 0; |
1548 | } | 1548 | } |
1549 | 1549 | ||
@@ -1570,9 +1570,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1570 | 1570 | ||
1571 | trace_i915_ring_wait_begin(ring); | 1571 | trace_i915_ring_wait_begin(ring); |
1572 | do { | 1572 | do { |
1573 | ring->head = I915_READ_HEAD(ring); | 1573 | ring->buffer->head = I915_READ_HEAD(ring); |
1574 | ring->space = ring_space(ring); | 1574 | ring->buffer->space = ring_space(ring); |
1575 | if (ring->space >= n) { | 1575 | if (ring->buffer->space >= n) { |
1576 | ret = 0; | 1576 | ret = 0; |
1577 | break; | 1577 | break; |
1578 | } | 1578 | } |
@@ -1608,21 +1608,21 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1608 | static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | 1608 | static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) |
1609 | { | 1609 | { |
1610 | uint32_t __iomem *virt; | 1610 | uint32_t __iomem *virt; |
1611 | int rem = ring->size - ring->tail; | 1611 | int rem = ring->buffer->size - ring->buffer->tail; |
1612 | 1612 | ||
1613 | if (ring->space < rem) { | 1613 | if (ring->buffer->space < rem) { |
1614 | int ret = ring_wait_for_space(ring, rem); | 1614 | int ret = ring_wait_for_space(ring, rem); |
1615 | if (ret) | 1615 | if (ret) |
1616 | return ret; | 1616 | return ret; |
1617 | } | 1617 | } |
1618 | 1618 | ||
1619 | virt = ring->virtual_start + ring->tail; | 1619 | virt = ring->buffer->virtual_start + ring->buffer->tail; |
1620 | rem /= 4; | 1620 | rem /= 4; |
1621 | while (rem--) | 1621 | while (rem--) |
1622 | iowrite32(MI_NOOP, virt++); | 1622 | iowrite32(MI_NOOP, virt++); |
1623 | 1623 | ||
1624 | ring->tail = 0; | 1624 | ring->buffer->tail = 0; |
1625 | ring->space = ring_space(ring); | 1625 | ring->buffer->space = ring_space(ring); |
1626 | 1626 | ||
1627 | return 0; | 1627 | return 0; |
1628 | } | 1628 | } |
@@ -1674,13 +1674,13 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, | |||
1674 | { | 1674 | { |
1675 | int ret; | 1675 | int ret; |
1676 | 1676 | ||
1677 | if (unlikely(ring->tail + bytes > ring->effective_size)) { | 1677 | if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) { |
1678 | ret = intel_wrap_ring_buffer(ring); | 1678 | ret = intel_wrap_ring_buffer(ring); |
1679 | if (unlikely(ret)) | 1679 | if (unlikely(ret)) |
1680 | return ret; | 1680 | return ret; |
1681 | } | 1681 | } |
1682 | 1682 | ||
1683 | if (unlikely(ring->space < bytes)) { | 1683 | if (unlikely(ring->buffer->space < bytes)) { |
1684 | ret = ring_wait_for_space(ring, bytes); | 1684 | ret = ring_wait_for_space(ring, bytes); |
1685 | if (unlikely(ret)) | 1685 | if (unlikely(ret)) |
1686 | return ret; | 1686 | return ret; |
@@ -1709,14 +1709,14 @@ int intel_ring_begin(struct intel_engine_cs *ring, | |||
1709 | if (ret) | 1709 | if (ret) |
1710 | return ret; | 1710 | return ret; |
1711 | 1711 | ||
1712 | ring->space -= num_dwords * sizeof(uint32_t); | 1712 | ring->buffer->space -= num_dwords * sizeof(uint32_t); |
1713 | return 0; | 1713 | return 0; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | /* Align the ring tail to a cacheline boundary */ | 1716 | /* Align the ring tail to a cacheline boundary */ |
1717 | int intel_ring_cacheline_align(struct intel_engine_cs *ring) | 1717 | int intel_ring_cacheline_align(struct intel_engine_cs *ring) |
1718 | { | 1718 | { |
1719 | int num_dwords = (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); | 1719 | int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); |
1720 | int ret; | 1720 | int ret; |
1721 | 1721 | ||
1722 | if (num_dwords == 0) | 1722 | if (num_dwords == 0) |
@@ -2094,13 +2094,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
2094 | INIT_LIST_HEAD(&ring->active_list); | 2094 | INIT_LIST_HEAD(&ring->active_list); |
2095 | INIT_LIST_HEAD(&ring->request_list); | 2095 | INIT_LIST_HEAD(&ring->request_list); |
2096 | 2096 | ||
2097 | ring->size = size; | 2097 | ring->buffer->size = size; |
2098 | ring->effective_size = ring->size; | 2098 | ring->buffer->effective_size = ring->buffer->size; |
2099 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 2099 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
2100 | ring->effective_size -= 2 * CACHELINE_BYTES; | 2100 | ring->buffer->effective_size -= 2 * CACHELINE_BYTES; |
2101 | 2101 | ||
2102 | ring->virtual_start = ioremap_wc(start, size); | 2102 | ring->buffer->virtual_start = ioremap_wc(start, size); |
2103 | if (ring->virtual_start == NULL) { | 2103 | if (ring->buffer->virtual_start == NULL) { |
2104 | DRM_ERROR("can not ioremap virtual address for" | 2104 | DRM_ERROR("can not ioremap virtual address for" |
2105 | " ring buffer\n"); | 2105 | " ring buffer\n"); |
2106 | ret = -ENOMEM; | 2106 | ret = -ENOMEM; |
@@ -2116,7 +2116,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
2116 | return 0; | 2116 | return 0; |
2117 | 2117 | ||
2118 | err_vstart: | 2118 | err_vstart: |
2119 | iounmap(ring->virtual_start); | 2119 | iounmap(ring->buffer->virtual_start); |
2120 | err_ringbuf: | 2120 | err_ringbuf: |
2121 | kfree(ringbuf); | 2121 | kfree(ringbuf); |
2122 | ring->buffer = NULL; | 2122 | ring->buffer = NULL; |