diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-09-03 08:01:39 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-09-04 04:17:00 -0400 |
commit | 01101fa7cc85fffc95798d1f67970dad96063fc6 (patch) | |
tree | 0e6b741c4ba5bd4b78dba8c4746d20627d9ff417 | |
parent | 742f491d2c204204086d2bc85cc5100daa6ff336 (diff) |
drm/i915: Refactor common ringbuffer allocation code
A small, very small, step to sharing the duplicate code between
execlists and legacy submission engines, starting with the ringbuffer
allocation code.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Arun Siluvery <arun.siluvery@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Paulo Zanoni <paulo.r.zanoni@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 89 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 8 |
3 files changed, 70 insertions, 76 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 40cbba4ea4ba..28a712e7d2d0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -2340,8 +2340,7 @@ void intel_lr_context_free(struct intel_context *ctx) | |||
2340 | i915_gem_object_ggtt_unpin(ctx_obj); | 2340 | i915_gem_object_ggtt_unpin(ctx_obj); |
2341 | } | 2341 | } |
2342 | WARN_ON(ctx->engine[ring->id].pin_count); | 2342 | WARN_ON(ctx->engine[ring->id].pin_count); |
2343 | intel_destroy_ringbuffer_obj(ringbuf); | 2343 | intel_ringbuffer_free(ringbuf); |
2344 | kfree(ringbuf); | ||
2345 | drm_gem_object_unreference(&ctx_obj->base); | 2344 | drm_gem_object_unreference(&ctx_obj->base); |
2346 | } | 2345 | } |
2347 | } | 2346 | } |
@@ -2442,42 +2441,20 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2442 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | 2441 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); |
2443 | } | 2442 | } |
2444 | 2443 | ||
2445 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | 2444 | ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); |
2446 | if (!ringbuf) { | 2445 | if (IS_ERR(ringbuf)) { |
2447 | DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", | 2446 | ret = PTR_ERR(ringbuf); |
2448 | ring->name); | ||
2449 | ret = -ENOMEM; | ||
2450 | goto error_unpin_ctx; | 2447 | goto error_unpin_ctx; |
2451 | } | 2448 | } |
2452 | 2449 | ||
2453 | ringbuf->ring = ring; | 2450 | if (is_global_default_ctx) { |
2454 | 2451 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | |
2455 | ringbuf->size = 4 * PAGE_SIZE; | ||
2456 | ringbuf->effective_size = ringbuf->size; | ||
2457 | ringbuf->head = 0; | ||
2458 | ringbuf->tail = 0; | ||
2459 | ringbuf->last_retired_head = -1; | ||
2460 | intel_ring_update_space(ringbuf); | ||
2461 | |||
2462 | if (ringbuf->obj == NULL) { | ||
2463 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
2464 | if (ret) { | 2452 | if (ret) { |
2465 | DRM_DEBUG_DRIVER( | 2453 | DRM_ERROR( |
2466 | "Failed to allocate ringbuffer obj %s: %d\n", | 2454 | "Failed to pin and map ringbuffer %s: %d\n", |
2467 | ring->name, ret); | 2455 | ring->name, ret); |
2468 | goto error_free_rbuf; | 2456 | goto error_ringbuf; |
2469 | } | 2457 | } |
2470 | |||
2471 | if (is_global_default_ctx) { | ||
2472 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | ||
2473 | if (ret) { | ||
2474 | DRM_ERROR( | ||
2475 | "Failed to pin and map ringbuffer %s: %d\n", | ||
2476 | ring->name, ret); | ||
2477 | goto error_destroy_rbuf; | ||
2478 | } | ||
2479 | } | ||
2480 | |||
2481 | } | 2458 | } |
2482 | 2459 | ||
2483 | ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); | 2460 | ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); |
@@ -2519,10 +2496,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, | |||
2519 | error: | 2496 | error: |
2520 | if (is_global_default_ctx) | 2497 | if (is_global_default_ctx) |
2521 | intel_unpin_ringbuffer_obj(ringbuf); | 2498 | intel_unpin_ringbuffer_obj(ringbuf); |
2522 | error_destroy_rbuf: | 2499 | error_ringbuf: |
2523 | intel_destroy_ringbuffer_obj(ringbuf); | 2500 | intel_ringbuffer_free(ringbuf); |
2524 | error_free_rbuf: | ||
2525 | kfree(ringbuf); | ||
2526 | error_unpin_ctx: | 2501 | error_unpin_ctx: |
2527 | if (is_global_default_ctx) | 2502 | if (is_global_default_ctx) |
2528 | i915_gem_object_ggtt_unpin(ctx_obj); | 2503 | i915_gem_object_ggtt_unpin(ctx_obj); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6e6b8db996ef..20a75bb516ac 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1996,14 +1996,14 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | |||
1996 | return 0; | 1996 | return 0; |
1997 | } | 1997 | } |
1998 | 1998 | ||
1999 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) | 1999 | static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) |
2000 | { | 2000 | { |
2001 | drm_gem_object_unreference(&ringbuf->obj->base); | 2001 | drm_gem_object_unreference(&ringbuf->obj->base); |
2002 | ringbuf->obj = NULL; | 2002 | ringbuf->obj = NULL; |
2003 | } | 2003 | } |
2004 | 2004 | ||
2005 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 2005 | static int intel_alloc_ringbuffer_obj(struct drm_device *dev, |
2006 | struct intel_ringbuffer *ringbuf) | 2006 | struct intel_ringbuffer *ringbuf) |
2007 | { | 2007 | { |
2008 | struct drm_i915_gem_object *obj; | 2008 | struct drm_i915_gem_object *obj; |
2009 | 2009 | ||
@@ -2023,6 +2023,48 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev, | |||
2023 | return 0; | 2023 | return 0; |
2024 | } | 2024 | } |
2025 | 2025 | ||
2026 | struct intel_ringbuffer * | ||
2027 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) | ||
2028 | { | ||
2029 | struct intel_ringbuffer *ring; | ||
2030 | int ret; | ||
2031 | |||
2032 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
2033 | if (ring == NULL) | ||
2034 | return ERR_PTR(-ENOMEM); | ||
2035 | |||
2036 | ring->ring = engine; | ||
2037 | |||
2038 | ring->size = size; | ||
2039 | /* Workaround an erratum on the i830 which causes a hang if | ||
2040 | * the TAIL pointer points to within the last 2 cachelines | ||
2041 | * of the buffer. | ||
2042 | */ | ||
2043 | ring->effective_size = size; | ||
2044 | if (IS_I830(engine->dev) || IS_845G(engine->dev)) | ||
2045 | ring->effective_size -= 2 * CACHELINE_BYTES; | ||
2046 | |||
2047 | ring->last_retired_head = -1; | ||
2048 | intel_ring_update_space(ring); | ||
2049 | |||
2050 | ret = intel_alloc_ringbuffer_obj(engine->dev, ring); | ||
2051 | if (ret) { | ||
2052 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2053 | engine->name, ret); | ||
2054 | kfree(ring); | ||
2055 | return ERR_PTR(ret); | ||
2056 | } | ||
2057 | |||
2058 | return ring; | ||
2059 | } | ||
2060 | |||
2061 | void | ||
2062 | intel_ringbuffer_free(struct intel_ringbuffer *ring) | ||
2063 | { | ||
2064 | intel_destroy_ringbuffer_obj(ring); | ||
2065 | kfree(ring); | ||
2066 | } | ||
2067 | |||
2026 | static int intel_init_ring_buffer(struct drm_device *dev, | 2068 | static int intel_init_ring_buffer(struct drm_device *dev, |
2027 | struct intel_engine_cs *ring) | 2069 | struct intel_engine_cs *ring) |
2028 | { | 2070 | { |
@@ -2031,22 +2073,20 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2031 | 2073 | ||
2032 | WARN_ON(ring->buffer); | 2074 | WARN_ON(ring->buffer); |
2033 | 2075 | ||
2034 | ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); | ||
2035 | if (!ringbuf) | ||
2036 | return -ENOMEM; | ||
2037 | ring->buffer = ringbuf; | ||
2038 | |||
2039 | ring->dev = dev; | 2076 | ring->dev = dev; |
2040 | INIT_LIST_HEAD(&ring->active_list); | 2077 | INIT_LIST_HEAD(&ring->active_list); |
2041 | INIT_LIST_HEAD(&ring->request_list); | 2078 | INIT_LIST_HEAD(&ring->request_list); |
2042 | INIT_LIST_HEAD(&ring->execlist_queue); | 2079 | INIT_LIST_HEAD(&ring->execlist_queue); |
2043 | i915_gem_batch_pool_init(dev, &ring->batch_pool); | 2080 | i915_gem_batch_pool_init(dev, &ring->batch_pool); |
2044 | ringbuf->size = 32 * PAGE_SIZE; | ||
2045 | ringbuf->ring = ring; | ||
2046 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); | 2081 | memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); |
2047 | 2082 | ||
2048 | init_waitqueue_head(&ring->irq_queue); | 2083 | init_waitqueue_head(&ring->irq_queue); |
2049 | 2084 | ||
2085 | ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); | ||
2086 | if (IS_ERR(ringbuf)) | ||
2087 | return PTR_ERR(ringbuf); | ||
2088 | ring->buffer = ringbuf; | ||
2089 | |||
2050 | if (I915_NEED_GFX_HWS(dev)) { | 2090 | if (I915_NEED_GFX_HWS(dev)) { |
2051 | ret = init_status_page(ring); | 2091 | ret = init_status_page(ring); |
2052 | if (ret) | 2092 | if (ret) |
@@ -2058,15 +2098,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2058 | goto error; | 2098 | goto error; |
2059 | } | 2099 | } |
2060 | 2100 | ||
2061 | WARN_ON(ringbuf->obj); | ||
2062 | |||
2063 | ret = intel_alloc_ringbuffer_obj(dev, ringbuf); | ||
2064 | if (ret) { | ||
2065 | DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", | ||
2066 | ring->name, ret); | ||
2067 | goto error; | ||
2068 | } | ||
2069 | |||
2070 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); | 2101 | ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); |
2071 | if (ret) { | 2102 | if (ret) { |
2072 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", | 2103 | DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", |
@@ -2075,14 +2106,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2075 | goto error; | 2106 | goto error; |
2076 | } | 2107 | } |
2077 | 2108 | ||
2078 | /* Workaround an erratum on the i830 which causes a hang if | ||
2079 | * the TAIL pointer points to within the last 2 cachelines | ||
2080 | * of the buffer. | ||
2081 | */ | ||
2082 | ringbuf->effective_size = ringbuf->size; | ||
2083 | if (IS_I830(dev) || IS_845G(dev)) | ||
2084 | ringbuf->effective_size -= 2 * CACHELINE_BYTES; | ||
2085 | |||
2086 | ret = i915_cmd_parser_init_ring(ring); | 2109 | ret = i915_cmd_parser_init_ring(ring); |
2087 | if (ret) | 2110 | if (ret) |
2088 | goto error; | 2111 | goto error; |
@@ -2090,7 +2113,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
2090 | return 0; | 2113 | return 0; |
2091 | 2114 | ||
2092 | error: | 2115 | error: |
2093 | kfree(ringbuf); | 2116 | intel_ringbuffer_free(ringbuf); |
2094 | ring->buffer = NULL; | 2117 | ring->buffer = NULL; |
2095 | return ret; | 2118 | return ret; |
2096 | } | 2119 | } |
@@ -2098,19 +2121,18 @@ error: | |||
2098 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | 2121 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) |
2099 | { | 2122 | { |
2100 | struct drm_i915_private *dev_priv; | 2123 | struct drm_i915_private *dev_priv; |
2101 | struct intel_ringbuffer *ringbuf; | ||
2102 | 2124 | ||
2103 | if (!intel_ring_initialized(ring)) | 2125 | if (!intel_ring_initialized(ring)) |
2104 | return; | 2126 | return; |
2105 | 2127 | ||
2106 | dev_priv = to_i915(ring->dev); | 2128 | dev_priv = to_i915(ring->dev); |
2107 | ringbuf = ring->buffer; | ||
2108 | 2129 | ||
2109 | intel_stop_ring_buffer(ring); | 2130 | intel_stop_ring_buffer(ring); |
2110 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); | 2131 | WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); |
2111 | 2132 | ||
2112 | intel_unpin_ringbuffer_obj(ringbuf); | 2133 | intel_unpin_ringbuffer_obj(ring->buffer); |
2113 | intel_destroy_ringbuffer_obj(ringbuf); | 2134 | intel_ringbuffer_free(ring->buffer); |
2135 | ring->buffer = NULL; | ||
2114 | 2136 | ||
2115 | if (ring->cleanup) | 2137 | if (ring->cleanup) |
2116 | ring->cleanup(ring); | 2138 | ring->cleanup(ring); |
@@ -2119,9 +2141,6 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) | |||
2119 | 2141 | ||
2120 | i915_cmd_parser_fini_ring(ring); | 2142 | i915_cmd_parser_fini_ring(ring); |
2121 | i915_gem_batch_pool_fini(&ring->batch_pool); | 2143 | i915_gem_batch_pool_fini(&ring->batch_pool); |
2122 | |||
2123 | kfree(ringbuf); | ||
2124 | ring->buffer = NULL; | ||
2125 | } | 2144 | } |
2126 | 2145 | ||
2127 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) | 2146 | static int ring_wait_for_space(struct intel_engine_cs *ring, int n) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 95b0b4b55fa6..49fa41dc0eb6 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -420,12 +420,12 @@ intel_write_status_page(struct intel_engine_cs *ring, | |||
420 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 | 420 | #define I915_GEM_HWS_SCRATCH_INDEX 0x40 |
421 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) | 421 | #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) |
422 | 422 | ||
423 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); | 423 | struct intel_ringbuffer * |
424 | intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); | ||
424 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, | 425 | int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, |
425 | struct intel_ringbuffer *ringbuf); | 426 | struct intel_ringbuffer *ringbuf); |
426 | void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); | 427 | void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); |
427 | int intel_alloc_ringbuffer_obj(struct drm_device *dev, | 428 | void intel_ringbuffer_free(struct intel_ringbuffer *ring); |
428 | struct intel_ringbuffer *ringbuf); | ||
429 | 429 | ||
430 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); | 430 | void intel_stop_ring_buffer(struct intel_engine_cs *ring); |
431 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); | 431 | void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); |