diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-09-03 08:01:39 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-09-04 04:17:00 -0400 |
commit | 01101fa7cc85fffc95798d1f67970dad96063fc6 (patch) | |
tree | 0e6b741c4ba5bd4b78dba8c4746d20627d9ff417 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 742f491d2c204204086d2bc85cc5100daa6ff336 (diff) |
drm/i915: Refactor common ringbuffer allocation code
A small, very small, step to sharing the duplicate code between
execlists and legacy submission engines, starting with the ringbuffer
allocation code.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 89 |
1 files changed, 54 insertions, 35 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..20a75bb516ac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
1996 | return 0; | 1996 | return 0; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | 1999 | static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) |
2000 | { | 2000 | { |
2001 | drm_gem_object_unreference(&ringbuf->obj->base); | 2001 | drm_gem_object_unreference(&ringbuf->obj->base); |
2002 | ringbuf->obj = NULL; | 2002 | ringbuf->obj = NULL; |
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 2005 | static int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
2006 | struct intel_ringbuffer *ringbuf) | 2006 | struct intel_ringbuffer *ringbuf) |
2007 | { | 2007 | { |
2008 | struct drm_i915_gem_object *obj; | 2008 | struct drm_i915_gem_object *obj; |
2009 | 2009 | ||
@@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, | |||
2023 | return 0; | 2023 | return 0; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | struct intel_ringbuffer * | ||
2027 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) | ||
2028 | { | ||
2029 | struct intel_ringbuffer *ring; | ||
2030 | int ret; | ||
2031 | |||
2032 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
2033 | if (ring == NULL) | ||
2034 | return ERR_PTR(-ENOMEM); | ||
2035 | |||
2036 | ring->ring = engine; | ||
2037 | |||
2038 | ring->size = size; | ||
2039 | /* Workaround an erratum on the i830 which causes a hang if | ||
2040 | * the TAIL pointer points to within the last 2 cachelines | ||
2041 | * of the buffer. | ||
2042 | */ | ||
2043 | ring->effective_size = size; | ||
2044 | if (IS_I830(engine->dev) || IS_845G(engine->dev)) | ||
2045 | ring->effective_size -= 2 * CACHELINE_BYTES; | ||
2046 | |||
2047 | ring->last_retired_head = -1; | ||
2048 | intel_ring_update_space(ring); | ||
2049 | |||
2050 | ret = intel_alloc_ringbuffer_obj(engine->dev, ring); | ||
2051 | if (ret) { | ||
2052 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2053 | engine->name, ret); | ||
2054 | kfree(ring); | ||
2055 | return ERR_PTR(ret); | ||
2056 | } | ||
2057 | |||
2058 | return ring; | ||
2059 | } | ||
2060 | |||
2061 | void | ||
2062 | intel_ringbuffer_free(struct intel_ringbuffer *ring) | ||
2063 | { | ||
2064 | intel_destroy_ringbuffer_obj(ring); | ||
2065 | kfree(ring); | ||
2066 | } | ||
2067 | |||
2026 | static int intel_init_ring_buffer(struct drm_device *dev, | 2068 | static int intel_init_ring_buffer(struct drm_device *dev, |
2027 | struct intel_engine_cs *ring) | 2069 | struct intel_engine_cs *ring) |
2028 | { | 2070 | { |
@@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2031 | 2073 | ||
2032 | WARN_ON(ring->buffer); | 2074 | WARN_ON(ring->buffer); |
2033 | 2075 | ||
2034 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | ||
2035 | if (!ringbuf) | ||
2036 | return -ENOMEM; | ||
2037 | ring->buffer = ringbuf; | ||
2038 | |||
2039 | ring->dev = dev; | 2076 | ring->dev = dev; |
2040 | INIT_LIST_HEAD(&ring->active_list); | 2077 | INIT_LIST_HEAD(&ring->active_list); |
2041 | INIT_LIST_HEAD(&ring->request_list); | 2078 | INIT_LIST_HEAD(&ring->request_list); |
2042 | INIT_LIST_HEAD(&ring->execlist_queue); | 2079 | INIT_LIST_HEAD(&ring->execlist_queue); |
2043 | i915_gem_batch_pool_init(dev, &ring->batch_pool); | 2080 | i915_gem_batch_pool_init(dev, &ring->batch_pool); |
2044 | ringbuf->size = 32 * PAGE_SIZE; | ||
2045 | ringbuf->ring = ring; | ||
2046 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 2081 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
2047 | 2082 | ||
2048 | init_waitqueue_head(&ring->irq_queue); | 2083 | init_waitqueue_head(&ring->irq_queue); |
2049 | 2084 | ||
2085 | ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); | ||
2086 | if (IS_ERR(ringbuf)) | ||
2087 | return PTR_ERR(ringbuf); | ||
2088 | ring->buffer = ringbuf; | ||
2089 | |||
2050 | if (I915_NEED_GFX_HWS(dev)) { | 2090 | if (I915_NEED_GFX_HWS(dev)) { |
2051 | ret = init_status_page(ring); | 2091 | ret = init_status_page(ring); |
2052 | if (ret) | 2092 | if (ret) |
@@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2058 | goto error; | 2098 | goto error; |
2059 | } | 2099 | } |
2060 | 2100 | ||
2061 | WARN_ON(ringbuf->obj); | ||
2062 | |||
2063 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
2064 | if (ret) { | ||
2065 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2066 | ring->name, ret); | ||
2067 | goto error; | ||
2068 | } | ||
2069 | |||
2070 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | 2101 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); |
2071 | if (ret) { | 2102 | if (ret) { |
2072 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | 2103 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", |
@@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2075 | goto error; | 2106 | goto error; |
2076 | } | 2107 | } |
2077 | 2108 | ||
2078 | /* Workaround an erratum on the i830 which causes a hang if | ||
2079 | * the TAIL pointer points to within the last 2 cachelines | ||
2080 | * of the buffer. | ||
2081 | */ | ||
2082 | ringbuf->effective_size = ringbuf->size; | ||
2083 | if (IS_I830(dev) || IS_845G(dev)) | ||
2084 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; | ||
2085 | |||
2086 | ret = i915_cmd_parser_init_ring(ring); | 2109 | ret = i915_cmd_parser_init_ring(ring); |
2087 | if (ret) | 2110 | if (ret) |
2088 | goto error; | 2111 | goto error; |
@@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2090 | return 0; | 2113 | return 0; |
2091 | 2114 | ||
2092 | error: | 2115 | error: |
2093 | kfree(ringbuf); | 2116 | intel_ringbuffer_free(ringbuf); |
2094 | ring->buffer = NULL; | 2117 | ring->buffer = NULL; |
2095 | return ret; | 2118 | return ret; |
2096 | } | 2119 | } |
@@ -2098,19 +2121,18 @@ error: | |||
2098 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | 2121 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) |
2099 | { | 2122 | { |
2100 | struct drm_i915_private *dev_priv; | 2123 | struct drm_i915_private *dev_priv; |
2101 | struct intel_ringbuffer *ringbuf; | ||
2102 | 2124 | ||
2103 | if (!intel_ring_initialized(ring)) | 2125 | if (!intel_ring_initialized(ring)) |
2104 | return; | 2126 | return; |
2105 | 2127 | ||
2106 | dev_priv = to_i915(ring->dev); | 2128 | dev_priv = to_i915(ring->dev); |
2107 | ringbuf = ring->buffer; | ||
2108 | 2129 | ||
2109 | intel_stop_ring_buffer(ring); | 2130 | intel_stop_ring_buffer(ring); |
2110 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | 2131 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); |
2111 | 2132 | ||
2112 | intel_unpin_ringbuffer_obj(ringbuf); | 2133 | intel_unpin_ringbuffer_obj(ring->buffer); |
2113 | intel_destroy_ringbuffer_obj(ringbuf); | 2134 | intel_ringbuffer_free(ring->buffer); |
2135 | ring->buffer = NULL; | ||
2114 | 2136 | ||
2115 | if (ring->cleanup) | 2137 | if (ring->cleanup) |
2116 | ring->cleanup(ring); | 2138 | ring->cleanup(ring); |
@@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
2119 | 2141 | ||
2120 | i915_cmd_parser_fini_ring(ring); | 2142 | i915_cmd_parser_fini_ring(ring); |
2121 | i915_gem_batch_pool_fini(&ring->batch_pool); | 2143 | i915_gem_batch_pool_fini(&ring->batch_pool); |
2122 | |||
2123 | kfree(ringbuf); | ||
2124 | ring->buffer = NULL; | ||
2125 | } | 2144 | } |
2126 | 2145 | ||
2127 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | 2146 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) |