diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 101 |
1 files changed, 63 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 16371a444426..4fb1ec95ec08 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -33,14 +33,24 @@ | |||
33 | #include "i915_trace.h" | 33 | #include "i915_trace.h" |
34 | #include "intel_drv.h" | 34 | #include "intel_drv.h" |
35 | 35 | ||
36 | /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, | 36 | bool |
37 | * but keeps the logic simple. Indeed, the whole purpose of this macro is just | 37 | intel_ring_initialized(struct intel_engine_cs *ring) |
38 | * to give some inclination as to some of the magic values used in the various | 38 | { |
39 | * workarounds! | 39 | struct drm_device *dev = ring->dev; |
40 | */ | 40 | |
41 | #define CACHELINE_BYTES 64 | 41 | if (!dev) |
42 | return false; | ||
42 | 43 | ||
43 | static inline int __ring_space(int head, int tail, int size) | 44 | if (i915.enable_execlists) { |
45 | struct intel_context *dctx = ring->default_context; | ||
46 | struct intel_ringbuffer *ringbuf = dctx->engine[ring->id].ringbuf; | ||
47 | |||
48 | return ringbuf->obj; | ||
49 | } else | ||
50 | return ring->buffer && ring->buffer->obj; | ||
51 | } | ||
52 | |||
53 | int __intel_ring_space(int head, int tail, int size) | ||
44 | { | 54 | { |
45 | int space = head - (tail + I915_RING_FREE_SPACE); | 55 | int space = head - (tail + I915_RING_FREE_SPACE); |
46 | if (space < 0) | 56 | if (space < 0) |
@@ -48,12 +58,13 @@ static inline int __ring_space(int head, int tail, int size) | |||
48 | return space; | 58 | return space; |
49 | } | 59 | } |
50 | 60 | ||
51 | static inline int ring_space(struct intel_ringbuffer *ringbuf) | 61 | int intel_ring_space(struct intel_ringbuffer *ringbuf) |
52 | { | 62 | { |
53 | return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); | 63 | return __intel_ring_space(ringbuf->head & HEAD_ADDR, |
64 | ringbuf->tail, ringbuf->size); | ||
54 | } | 65 | } |
55 | 66 | ||
56 | static bool intel_ring_stopped(struct intel_engine_cs *ring) | 67 | bool intel_ring_stopped(struct intel_engine_cs *ring) |
57 | { | 68 | { |
58 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 69 | struct drm_i915_private *dev_priv = ring->dev->dev_private; |
59 | return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); | 70 | return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); |
@@ -476,9 +487,14 @@ static bool stop_ring(struct intel_engine_cs *ring) | |||
476 | 487 | ||
477 | if (!IS_GEN2(ring->dev)) { | 488 | if (!IS_GEN2(ring->dev)) { |
478 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); | 489 | I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); |
479 | if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { | 490 | if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { |
480 | DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); | 491 | DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); |
481 | return false; | 492 | /* Sometimes we observe that the idle flag is not |
493 | * set even though the ring is empty. So double | ||
494 | * check before giving up. | ||
495 | */ | ||
496 | if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) | ||
497 | return false; | ||
482 | } | 498 | } |
483 | } | 499 | } |
484 | 500 | ||
@@ -563,7 +579,7 @@ static int init_ring_common(struct intel_engine_cs *ring) | |||
563 | else { | 579 | else { |
564 | ringbuf->head = I915_READ_HEAD(ring); | 580 | ringbuf->head = I915_READ_HEAD(ring); |
565 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; | 581 | ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; |
566 | ringbuf->space = ring_space(ringbuf); | 582 | ringbuf->space = intel_ring_space(ringbuf); |
567 | ringbuf->last_retired_head = -1; | 583 | ringbuf->last_retired_head = -1; |
568 | } | 584 | } |
569 | 585 | ||
@@ -575,8 +591,25 @@ out: | |||
575 | return ret; | 591 | return ret; |
576 | } | 592 | } |
577 | 593 | ||
578 | static int | 594 | void |
579 | init_pipe_control(struct intel_engine_cs *ring) | 595 | intel_fini_pipe_control(struct intel_engine_cs *ring) |
596 | { | ||
597 | struct drm_device *dev = ring->dev; | ||
598 | |||
599 | if (ring->scratch.obj == NULL) | ||
600 | return; | ||
601 | |||
602 | if (INTEL_INFO(dev)->gen >= 5) { | ||
603 | kunmap(sg_page(ring->scratch.obj->pages->sgl)); | ||
604 | i915_gem_object_ggtt_unpin(ring->scratch.obj); | ||
605 | } | ||
606 | |||
607 | drm_gem_object_unreference(&ring->scratch.obj->base); | ||
608 | ring->scratch.obj = NULL; | ||
609 | } | ||
610 | |||
611 | int | ||
612 | intel_init_pipe_control(struct intel_engine_cs *ring) | ||
580 | { | 613 | { |
581 | int ret; | 614 | int ret; |
582 | 615 | ||
@@ -651,7 +684,7 @@ static int init_render_ring(struct intel_engine_cs *ring) | |||
651 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); | 684 | _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); |
652 | 685 | ||
653 | if (INTEL_INFO(dev)->gen >= 5) { | 686 | if (INTEL_INFO(dev)->gen >= 5) { |
654 | ret = init_pipe_control(ring); | 687 | ret = intel_init_pipe_control(ring); |
655 | if (ret) | 688 | if (ret) |
656 | return ret; | 689 | return ret; |
657 | } | 690 | } |
@@ -686,16 +719,7 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) | |||
686 | dev_priv->semaphore_obj = NULL; | 719 | dev_priv->semaphore_obj = NULL; |
687 | } | 720 | } |
688 | 721 | ||
689 | if (ring->scratch.obj == NULL) | 722 | intel_fini_pipe_control(ring); |
690 | return; | ||
691 | |||
692 | if (INTEL_INFO(dev)->gen >= 5) { | ||
693 | kunmap(sg_page(ring->scratch.obj->pages->sgl)); | ||
694 | i915_gem_object_ggtt_unpin(ring->scratch.obj); | ||
695 | } | ||
696 | |||
697 | drm_gem_object_unreference(&ring->scratch.obj->base); | ||
698 | ring->scratch.obj = NULL; | ||
699 | } | 723 | } |
700 | 724 | ||
701 | static int gen8_rcs_signal(struct intel_engine_cs *signaller, | 725 | static int gen8_rcs_signal(struct intel_engine_cs *signaller, |
@@ -1514,7 +1538,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring) | |||
1514 | return 0; | 1538 | return 0; |
1515 | } | 1539 | } |
1516 | 1540 | ||
1517 | static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | 1541 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) |
1518 | { | 1542 | { |
1519 | if (!ringbuf->obj) | 1543 | if (!ringbuf->obj) |
1520 | return; | 1544 | return; |
@@ -1525,8 +1549,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | |||
1525 | ringbuf->obj = NULL; | 1549 | ringbuf->obj = NULL; |
1526 | } | 1550 | } |
1527 | 1551 | ||
1528 | static int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 1552 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
1529 | struct intel_ringbuffer *ringbuf) | 1553 | struct intel_ringbuffer *ringbuf) |
1530 | { | 1554 | { |
1531 | struct drm_i915_private *dev_priv = to_i915(dev); | 1555 | struct drm_i915_private *dev_priv = to_i915(dev); |
1532 | struct drm_i915_gem_object *obj; | 1556 | struct drm_i915_gem_object *obj; |
@@ -1588,7 +1612,9 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1588 | ring->dev = dev; | 1612 | ring->dev = dev; |
1589 | INIT_LIST_HEAD(&ring->active_list); | 1613 | INIT_LIST_HEAD(&ring->active_list); |
1590 | INIT_LIST_HEAD(&ring->request_list); | 1614 | INIT_LIST_HEAD(&ring->request_list); |
1615 | INIT_LIST_HEAD(&ring->execlist_queue); | ||
1591 | ringbuf->size = 32 * PAGE_SIZE; | 1616 | ringbuf->size = 32 * PAGE_SIZE; |
1617 | ringbuf->ring = ring; | ||
1592 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 1618 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
1593 | 1619 | ||
1594 | init_waitqueue_head(&ring->irq_queue); | 1620 | init_waitqueue_head(&ring->irq_queue); |
@@ -1671,13 +1697,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1671 | ringbuf->head = ringbuf->last_retired_head; | 1697 | ringbuf->head = ringbuf->last_retired_head; |
1672 | ringbuf->last_retired_head = -1; | 1698 | ringbuf->last_retired_head = -1; |
1673 | 1699 | ||
1674 | ringbuf->space = ring_space(ringbuf); | 1700 | ringbuf->space = intel_ring_space(ringbuf); |
1675 | if (ringbuf->space >= n) | 1701 | if (ringbuf->space >= n) |
1676 | return 0; | 1702 | return 0; |
1677 | } | 1703 | } |
1678 | 1704 | ||
1679 | list_for_each_entry(request, &ring->request_list, list) { | 1705 | list_for_each_entry(request, &ring->request_list, list) { |
1680 | if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { | 1706 | if (__intel_ring_space(request->tail, ringbuf->tail, |
1707 | ringbuf->size) >= n) { | ||
1681 | seqno = request->seqno; | 1708 | seqno = request->seqno; |
1682 | break; | 1709 | break; |
1683 | } | 1710 | } |
@@ -1694,7 +1721,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n) | |||
1694 | ringbuf->head = ringbuf->last_retired_head; | 1721 | ringbuf->head = ringbuf->last_retired_head; |
1695 | ringbuf->last_retired_head = -1; | 1722 | ringbuf->last_retired_head = -1; |
1696 | 1723 | ||
1697 | ringbuf->space = ring_space(ringbuf); | 1724 | ringbuf->space = intel_ring_space(ringbuf); |
1698 | return 0; | 1725 | return 0; |
1699 | } | 1726 | } |
1700 | 1727 | ||
@@ -1723,7 +1750,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | |||
1723 | trace_i915_ring_wait_begin(ring); | 1750 | trace_i915_ring_wait_begin(ring); |
1724 | do { | 1751 | do { |
1725 | ringbuf->head = I915_READ_HEAD(ring); | 1752 | ringbuf->head = I915_READ_HEAD(ring); |
1726 | ringbuf->space = ring_space(ringbuf); | 1753 | ringbuf->space = intel_ring_space(ringbuf); |
1727 | if (ringbuf->space >= n) { | 1754 | if (ringbuf->space >= n) { |
1728 | ret = 0; | 1755 | ret = 0; |
1729 | break; | 1756 | break; |
@@ -1775,7 +1802,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring) | |||
1775 | iowrite32(MI_NOOP, virt++); | 1802 | iowrite32(MI_NOOP, virt++); |
1776 | 1803 | ||
1777 | ringbuf->tail = 0; | 1804 | ringbuf->tail = 0; |
1778 | ringbuf->space = ring_space(ringbuf); | 1805 | ringbuf->space = intel_ring_space(ringbuf); |
1779 | 1806 | ||
1780 | return 0; | 1807 | return 0; |
1781 | } | 1808 | } |
@@ -1980,9 +2007,7 @@ gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
1980 | u64 offset, u32 len, | 2007 | u64 offset, u32 len, |
1981 | unsigned flags) | 2008 | unsigned flags) |
1982 | { | 2009 | { |
1983 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | 2010 | bool ppgtt = USES_PPGTT(ring->dev) && !(flags & I915_DISPATCH_SECURE); |
1984 | bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && | ||
1985 | !(flags & I915_DISPATCH_SECURE); | ||
1986 | int ret; | 2011 | int ret; |
1987 | 2012 | ||
1988 | ret = intel_ring_begin(ring, 4); | 2013 | ret = intel_ring_begin(ring, 4); |