diff options
author | Oscar Mateo <oscar.mateo@intel.com> | 2014-05-22 09:13:36 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-05-22 17:30:18 -0400 |
commit | 93b0a4e0b26e80d8ddf476024e834e675850df81 (patch) | |
tree | d548d6ad86885f230ae3cab68f9050d9c5fd0442 /drivers | |
parent | ee1b1e5ef38d22e2447b48b8456a2b2bcf438e65 (diff) |
drm/i915: Split the ringbuffers from the rings (3/3)
Manual cleanup after the previous Coccinelle script.
Yes, I could write another Coccinelle script to do this but I
don't want labor-replacing robots making an honest programmer's
work obsolete (also, I'm lazy).
Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 8 |
4 files changed, 72 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 234d454923b5..4e70de6ed468 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -141,6 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
141 | struct drm_i915_private *dev_priv = dev->dev_private; | 141 | struct drm_i915_private *dev_priv = dev->dev_private; |
142 | struct drm_i915_master_private *master_priv; | 142 | struct drm_i915_master_private *master_priv; |
143 | struct intel_engine_cs *ring = LP_RING(dev_priv); | 143 | struct intel_engine_cs *ring = LP_RING(dev_priv); |
144 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
144 | 145 | ||
145 | /* | 146 | /* |
146 | * We should never lose context on the ring with modesetting | 147 | * We should never lose context on the ring with modesetting |
@@ -149,17 +150,17 @@ void i915_kernel_lost_context(struct drm_device * dev) | |||
149 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 150 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
150 | return; | 151 | return; |
151 | 152 | ||
152 | ring->buffer->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 153 | ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR; |
153 | ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 154 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
154 | ring->buffer->space = ring->buffer->head - (ring->buffer->tail + I915_RING_FREE_SPACE); | 155 | ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE); |
155 | if (ring->buffer->space < 0) | 156 | if (ringbuf->space < 0) |
156 | ring->buffer->space += ring->buffer->size; | 157 | ringbuf->space += ringbuf->size; |
157 | 158 | ||
158 | if (!dev->primary->master) | 159 | if (!dev->primary->master) |
159 | return; | 160 | return; |
160 | 161 | ||
161 | master_priv = dev->primary->master->driver_priv; | 162 | master_priv = dev->primary->master->driver_priv; |
162 | if (ring->buffer->head == ring->buffer->tail && master_priv->sarea_priv) | 163 | if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv) |
163 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; | 164 | master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; |
164 | } | 165 | } |
165 | 166 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 152ba128405b..28bae6e4a424 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1219,7 +1219,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) | |||
1219 | static void notify_ring(struct drm_device *dev, | 1219 | static void notify_ring(struct drm_device *dev, |
1220 | struct intel_engine_cs *ring) | 1220 | struct intel_engine_cs *ring) |
1221 | { | 1221 | { |
1222 | if (ring->buffer->obj == NULL) | 1222 | if (!intel_ring_initialized(ring)) |
1223 | return; | 1223 | return; |
1224 | 1224 | ||
1225 | trace_i915_gem_request_complete(ring); | 1225 | trace_i915_gem_request_complete(ring); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 70f1b8820fa4..3379722d0e6d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -50,7 +50,8 @@ static inline int __ring_space(int head, int tail, int size) | |||
50 | 50 | ||
51 | static inline int ring_space(struct intel_engine_cs *ring) | 51 | static inline int ring_space(struct intel_engine_cs *ring) |
52 | { | 52 | { |
53 | return __ring_space(ring->buffer->head & HEAD_ADDR, ring->buffer->tail, ring->buffer->size); | 53 | struct intel_ringbuffer *ringbuf = ring->buffer; |
54 | return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); | ||
54 | } | 55 | } |
55 | 56 | ||
56 | static bool intel_ring_stopped(struct intel_engine_cs *ring) | 57 | static bool intel_ring_stopped(struct intel_engine_cs *ring) |
@@ -61,10 +62,11 @@ static bool intel_ring_stopped(struct intel_engine_cs *ring) | |||
61 | 62 | ||
62 | void __intel_ring_advance(struct intel_engine_cs *ring) | 63 | void __intel_ring_advance(struct intel_engine_cs *ring) |
63 | { | 64 | { |
64 | ring->buffer->tail &= ring->buffer->size - 1; | 65 | struct intel_ringbuffer *ringbuf = ring->buffer; |
66 | ringbuf->tail &= ringbuf->size - 1; | ||
65 | if (intel_ring_stopped(ring)) | 67 | if (intel_ring_stopped(ring)) |
66 | return; | 68 | return; |
67 | ring->write_tail(ring, ring->buffer->tail); | 69 | ring->write_tail(ring, ringbuf->tail); |
68 | } | 70 | } |
69 | 71 | ||
70 | static int | 72 | static int |
@@ -481,7 +483,8 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
481 | { | 483 | { |
482 | struct drm_device *dev = ring->dev; | 484 | struct drm_device *dev = ring->dev; |
483 | struct drm_i915_private *dev_priv = dev->dev_private; | 485 | struct drm_i915_private *dev_priv = dev->dev_private; |
484 | struct drm_i915_gem_object *obj = ring->buffer->obj; | 486 | struct intel_ringbuffer *ringbuf = ring->buffer; |
487 | struct drm_i915_gem_object *obj = ringbuf->obj; | ||
485 | int ret = 0; | 488 | int ret = 0; |
486 | 489 | ||
487 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); | 490 | gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); |
@@ -520,7 +523,7 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
520 | * register values. */ | 523 | * register values. */ |
521 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); | 524 | I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); |
522 | I915_WRITE_CTL(ring, | 525 | I915_WRITE_CTL(ring, |
523 | ((ring->buffer->size - PAGE_SIZE) & RING_NR_PAGES) | 526 | ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) |
524 | | RING_VALID); | 527 | | RING_VALID); |
525 | 528 | ||
526 | /* If the head is still not zero, the ring is dead */ | 529 | /* If the head is still not zero, the ring is dead */ |
@@ -540,10 +543,10 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
540 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) | 543 | if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) |
541 | i915_kernel_lost_context(ring->dev); | 544 | i915_kernel_lost_context(ring->dev); |
542 | else { | 545 | else { |
543 | ring->buffer->head = I915_READ_HEAD(ring); | 546 | ringbuf->head = I915_READ_HEAD(ring); |
544 | ring->buffer->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 547 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
545 | ring->buffer->space = ring_space(ring); | 548 | ringbuf->space = ring_space(ring); |
546 | ring->buffer->last_retired_head = -1; | 549 | ringbuf->last_retired_head = -1; |
547 | } | 550 | } |
548 | 551 | ||
549 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); | 552 | memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); |
@@ -1379,17 +1382,18 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) | |||
1379 | { | 1382 | { |
1380 | struct drm_device *dev = ring->dev; | 1383 | struct drm_device *dev = ring->dev; |
1381 | struct drm_i915_private *dev_priv = to_i915(dev); | 1384 | struct drm_i915_private *dev_priv = to_i915(dev); |
1385 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
1382 | struct drm_i915_gem_object *obj; | 1386 | struct drm_i915_gem_object *obj; |
1383 | int ret; | 1387 | int ret; |
1384 | 1388 | ||
1385 | if (ring->buffer->obj) | 1389 | if (intel_ring_initialized(ring)) |
1386 | return 0; | 1390 | return 0; |
1387 | 1391 | ||
1388 | obj = NULL; | 1392 | obj = NULL; |
1389 | if (!HAS_LLC(dev)) | 1393 | if (!HAS_LLC(dev)) |
1390 | obj = i915_gem_object_create_stolen(dev, ring->buffer->size); | 1394 | obj = i915_gem_object_create_stolen(dev, ringbuf->size); |
1391 | if (obj == NULL) | 1395 | if (obj == NULL) |
1392 | obj = i915_gem_alloc_object(dev, ring->buffer->size); | 1396 | obj = i915_gem_alloc_object(dev, ringbuf->size); |
1393 | if (obj == NULL) | 1397 | if (obj == NULL) |
1394 | return -ENOMEM; | 1398 | return -ENOMEM; |
1395 | 1399 | ||
@@ -1401,15 +1405,15 @@ static int allocate_ring_buffer(struct intel_engine_cs *ring) | |||
1401 | if (ret) | 1405 | if (ret) |
1402 | goto err_unpin; | 1406 | goto err_unpin; |
1403 | 1407 | ||
1404 | ring->buffer->virtual_start = | 1408 | ringbuf->virtual_start = |
1405 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), | 1409 | ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), |
1406 | ring->buffer->size); | 1410 | ringbuf->size); |
1407 | if (ring->buffer->virtual_start == NULL) { | 1411 | if (ringbuf->virtual_start == NULL) { |
1408 | ret = -EINVAL; | 1412 | ret = -EINVAL; |
1409 | goto err_unpin; | 1413 | goto err_unpin; |
1410 | } | 1414 | } |
1411 | 1415 | ||
1412 | ring->buffer->obj = obj; | 1416 | ringbuf->obj = obj; |
1413 | return 0; | 1417 | return 0; |
1414 | 1418 | ||
1415 | err_unpin: | 1419 | err_unpin: |
@@ -1435,7 +1439,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1435 | ring->dev = dev; | 1439 | ring->dev = dev; |
1436 | INIT_LIST_HEAD(&ring->active_list); | 1440 | INIT_LIST_HEAD(&ring->active_list); |
1437 | INIT_LIST_HEAD(&ring->request_list); | 1441 | INIT_LIST_HEAD(&ring->request_list); |
1438 | ring->buffer->size = 32 * PAGE_SIZE; | 1442 | ringbuf->size = 32 * PAGE_SIZE; |
1439 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 1443 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
1440 | 1444 | ||
1441 | init_waitqueue_head(&ring->irq_queue); | 1445 | init_waitqueue_head(&ring->irq_queue); |
@@ -1461,9 +1465,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1461 | * the TAIL pointer points to within the last 2 cachelines | 1465 | * the TAIL pointer points to within the last 2 cachelines |
1462 | * of the buffer. | 1466 | * of the buffer. |
1463 | */ | 1467 | */ |
1464 | ring->buffer->effective_size = ring->buffer->size; | 1468 | ringbuf->effective_size = ringbuf->size; |
1465 | if (IS_I830(dev) || IS_845G(dev)) | 1469 | if (IS_I830(dev) || IS_845G(dev)) |
1466 | ring->buffer->effective_size -= 2 * CACHELINE_BYTES; | 1470 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; |
1467 | 1471 | ||
1468 | ret = i915_cmd_parser_init_ring(ring); | 1472 | ret = i915_cmd_parser_init_ring(ring); |
1469 | if (ret) | 1473 | if (ret) |
@@ -1484,18 +1488,19 @@ error: | |||
1484 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | 1488 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) |
1485 | { | 1489 | { |
1486 | struct drm_i915_private *dev_priv = to_i915(ring->dev); | 1490 | struct drm_i915_private *dev_priv = to_i915(ring->dev); |
1491 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
1487 | 1492 | ||
1488 | if (ring->buffer->obj == NULL) | 1493 | if (!intel_ring_initialized(ring)) |
1489 | return; | 1494 | return; |
1490 | 1495 | ||
1491 | intel_stop_ring_buffer(ring); | 1496 | intel_stop_ring_buffer(ring); |
1492 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); | 1497 | WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); |
1493 | 1498 | ||
1494 | iounmap(ring->buffer->virtual_start); | 1499 | iounmap(ringbuf->virtual_start); |
1495 | 1500 | ||
1496 | i915_gem_object_ggtt_unpin(ring->buffer->obj); | 1501 | i915_gem_object_ggtt_unpin(ringbuf->obj); |
1497 | drm_gem_object_unreference(&ring->buffer->obj->base); | 1502 | drm_gem_object_unreference(&ringbuf->obj->base); |
1498 | ring->buffer->obj = NULL; | 1503 | ringbuf->obj = NULL; |
1499 | ring->preallocated_lazy_request = NULL; | 1504 | ring->preallocated_lazy_request = NULL; |
1500 | ring->outstanding_lazy_seqno = 0; | 1505 | ring->outstanding_lazy_seqno = 0; |
1501 | 1506 | ||
@@ -1506,27 +1511,28 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
1506 | 1511 | ||
1507 | i915_cmd_parser_fini_ring(ring); | 1512 | i915_cmd_parser_fini_ring(ring); |
1508 | 1513 | ||
1509 | kfree(ring->buffer); | 1514 | kfree(ringbuf); |
1510 | ring->buffer = NULL; | 1515 | ring->buffer = NULL; |
1511 | } | 1516 | } |
1512 | 1517 | ||
1513 | static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | 1518 | static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) |
1514 | { | 1519 | { |
1520 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
1515 | struct drm_i915_gem_request *request; | 1521 | struct drm_i915_gem_request *request; |
1516 | u32 seqno = 0; | 1522 | u32 seqno = 0; |
1517 | int ret; | 1523 | int ret; |
1518 | 1524 | ||
1519 | if (ring->buffer->last_retired_head != -1) { | 1525 | if (ringbuf->last_retired_head != -1) { |
1520 | ring->buffer->head = ring->buffer->last_retired_head; | 1526 | ringbuf->head = ringbuf->last_retired_head; |
1521 | ring->buffer->last_retired_head = -1; | 1527 | ringbuf->last_retired_head = -1; |
1522 | 1528 | ||
1523 | ring->buffer->space = ring_space(ring); | 1529 | ringbuf->space = ring_space(ring); |
1524 | if (ring->buffer->space >= n) | 1530 | if (ringbuf->space >= n) |
1525 | return 0; | 1531 | return 0; |
1526 | } | 1532 | } |
1527 | 1533 | ||
1528 | list_for_each_entry(request, &ring->request_list, list) { | 1534 | list_for_each_entry(request, &ring->request_list, list) { |
1529 | if (__ring_space(request->tail, ring->buffer->tail, ring->buffer->size) >= n) { | 1535 | if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { |
1530 | seqno = request->seqno; | 1536 | seqno = request->seqno; |
1531 | break; | 1537 | break; |
1532 | } | 1538 | } |
@@ -1540,10 +1546,10 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1540 | return ret; | 1546 | return ret; |
1541 | 1547 | ||
1542 | i915_gem_retire_requests_ring(ring); | 1548 | i915_gem_retire_requests_ring(ring); |
1543 | ring->buffer->head = ring->buffer->last_retired_head; | 1549 | ringbuf->head = ringbuf->last_retired_head; |
1544 | ring->buffer->last_retired_head = -1; | 1550 | ringbuf->last_retired_head = -1; |
1545 | 1551 | ||
1546 | ring->buffer->space = ring_space(ring); | 1552 | ringbuf->space = ring_space(ring); |
1547 | return 0; | 1553 | return 0; |
1548 | } | 1554 | } |
1549 | 1555 | ||
@@ -1551,6 +1557,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1551 | { | 1557 | { |
1552 | struct drm_device *dev = ring->dev; | 1558 | struct drm_device *dev = ring->dev; |
1553 | struct drm_i915_private *dev_priv = dev->dev_private; | 1559 | struct drm_i915_private *dev_priv = dev->dev_private; |
1560 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
1554 | unsigned long end; | 1561 | unsigned long end; |
1555 | int ret; | 1562 | int ret; |
1556 | 1563 | ||
@@ -1570,9 +1577,9 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1570 | 1577 | ||
1571 | trace_i915_ring_wait_begin(ring); | 1578 | trace_i915_ring_wait_begin(ring); |
1572 | do { | 1579 | do { |
1573 | ring->buffer->head = I915_READ_HEAD(ring); | 1580 | ringbuf->head = I915_READ_HEAD(ring); |
1574 | ring->buffer->space = ring_space(ring); | 1581 | ringbuf->space = ring_space(ring); |
1575 | if (ring->buffer->space >= n) { | 1582 | if (ringbuf->space >= n) { |
1576 | ret = 0; | 1583 | ret = 0; |
1577 | break; | 1584 | break; |
1578 | } | 1585 | } |
@@ -1608,21 +1615,22 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1608 | static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | 1615 | static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) |
1609 | { | 1616 | { |
1610 | uint32_t __iomem *virt; | 1617 | uint32_t __iomem *virt; |
1611 | int rem = ring->buffer->size - ring->buffer->tail; | 1618 | struct intel_ringbuffer *ringbuf = ring->buffer; |
1619 | int rem = ringbuf->size - ringbuf->tail; | ||
1612 | 1620 | ||
1613 | if (ring->buffer->space < rem) { | 1621 | if (ringbuf->space < rem) { |
1614 | int ret = ring_wait_for_space(ring, rem); | 1622 | int ret = ring_wait_for_space(ring, rem); |
1615 | if (ret) | 1623 | if (ret) |
1616 | return ret; | 1624 | return ret; |
1617 | } | 1625 | } |
1618 | 1626 | ||
1619 | virt = ring->buffer->virtual_start + ring->buffer->tail; | 1627 | virt = ringbuf->virtual_start + ringbuf->tail; |
1620 | rem /= 4; | 1628 | rem /= 4; |
1621 | while (rem--) | 1629 | while (rem--) |
1622 | iowrite32(MI_NOOP, virt++); | 1630 | iowrite32(MI_NOOP, virt++); |
1623 | 1631 | ||
1624 | ring->buffer->tail = 0; | 1632 | ringbuf->tail = 0; |
1625 | ring->buffer->space = ring_space(ring); | 1633 | ringbuf->space = ring_space(ring); |
1626 | 1634 | ||
1627 | return 0; | 1635 | return 0; |
1628 | } | 1636 | } |
@@ -1672,15 +1680,16 @@ intel_ring_alloc_seqno(struct intel_engine_cs *ring) | |||
1672 | static int __intel_ring_prepare(struct intel_engine_cs *ring, | 1680 | static int __intel_ring_prepare(struct intel_engine_cs *ring, |
1673 | int bytes) | 1681 | int bytes) |
1674 | { | 1682 | { |
1683 | struct intel_ringbuffer *ringbuf = ring->buffer; | ||
1675 | int ret; | 1684 | int ret; |
1676 | 1685 | ||
1677 | if (unlikely(ring->buffer->tail + bytes > ring->buffer->effective_size)) { | 1686 | if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { |
1678 | ret = intel_wrap_ring_buffer(ring); | 1687 | ret = intel_wrap_ring_buffer(ring); |
1679 | if (unlikely(ret)) | 1688 | if (unlikely(ret)) |
1680 | return ret; | 1689 | return ret; |
1681 | } | 1690 | } |
1682 | 1691 | ||
1683 | if (unlikely(ring->buffer->space < bytes)) { | 1692 | if (unlikely(ringbuf->space < bytes)) { |
1684 | ret = ring_wait_for_space(ring, bytes); | 1693 | ret = ring_wait_for_space(ring, bytes); |
1685 | if (unlikely(ret)) | 1694 | if (unlikely(ret)) |
1686 | return ret; | 1695 | return ret; |
@@ -2094,13 +2103,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
2094 | INIT_LIST_HEAD(&ring->active_list); | 2103 | INIT_LIST_HEAD(&ring->active_list); |
2095 | INIT_LIST_HEAD(&ring->request_list); | 2104 | INIT_LIST_HEAD(&ring->request_list); |
2096 | 2105 | ||
2097 | ring->buffer->size = size; | 2106 | ringbuf->size = size; |
2098 | ring->buffer->effective_size = ring->buffer->size; | 2107 | ringbuf->effective_size = ringbuf->size; |
2099 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) | 2108 | if (IS_I830(ring->dev) || IS_845G(ring->dev)) |
2100 | ring->buffer->effective_size -= 2 * CACHELINE_BYTES; | 2109 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; |
2101 | 2110 | ||
2102 | ring->buffer->virtual_start = ioremap_wc(start, size); | 2111 | ringbuf->virtual_start = ioremap_wc(start, size); |
2103 | if (ring->buffer->virtual_start == NULL) { | 2112 | if (ringbuf->virtual_start == NULL) { |
2104 | DRM_ERROR("can not ioremap virtual address for" | 2113 | DRM_ERROR("can not ioremap virtual address for" |
2105 | " ring buffer\n"); | 2114 | " ring buffer\n"); |
2106 | ret = -ENOMEM; | 2115 | ret = -ENOMEM; |
@@ -2116,7 +2125,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
2116 | return 0; | 2125 | return 0; |
2117 | 2126 | ||
2118 | err_vstart: | 2127 | err_vstart: |
2119 | iounmap(ring->buffer->virtual_start); | 2128 | iounmap(ringbuf->virtual_start); |
2120 | err_ringbuf: | 2129 | err_ringbuf: |
2121 | kfree(ringbuf); | 2130 | kfree(ringbuf); |
2122 | ring->buffer = NULL; | 2131 | ring->buffer = NULL; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index c26def080f21..5c509e74c722 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -293,12 +293,14 @@ int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); | |||
293 | static inline void intel_ring_emit(struct intel_engine_cs *ring, | 293 | static inline void intel_ring_emit(struct intel_engine_cs *ring, |
294 | u32 data) | 294 | u32 data) |
295 | { | 295 | { |
296 | iowrite32(data, ring->buffer->virtual_start + ring->buffer->tail); | 296 | struct intel_ringbuffer *ringbuf = ring->buffer; |
297 | ring->buffer->tail += 4; | 297 | iowrite32(data, ringbuf->virtual_start + ringbuf->tail); |
298 | ringbuf->tail += 4; | ||
298 | } | 299 | } |
299 | static inline void intel_ring_advance(struct intel_engine_cs *ring) | 300 | static inline void intel_ring_advance(struct intel_engine_cs *ring) |
300 | { | 301 | { |
301 | ring->buffer->tail &= ring->buffer->size - 1; | 302 | struct intel_ringbuffer *ringbuf = ring->buffer; |
303 | ringbuf->tail &= ringbuf->size - 1; | ||
302 | } | 304 | } |
303 | void __intel_ring_advance(struct intel_engine_cs *ring); | 305 | void __intel_ring_advance(struct intel_engine_cs *ring); |
304 | 306 | ||