aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorOscar Mateo <oscar.mateo@intel.com>2014-07-24 12:04:15 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-08-11 10:10:58 -0400
commit84c2377fcee7a43cd964b62143e9a3714130bb0c (patch)
tree15f76ff6f576544e00180e1231558ecd847a28a8 /drivers
parent8c8579176a144b1dca1d99ebb92510924168d508 (diff)
drm/i915/bdw: Allocate ringbuffers for Logical Ring Contexts
As we have said a couple of times by now, logical ring contexts have their own ringbuffers: not only the backing pages, but the whole management struct. In a previous version of the series, this was achieved with two separate patches: drm/i915/bdw: Allocate ringbuffer backing objects for default global LRC drm/i915/bdw: Allocate ringbuffer for user-created LRCs Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h4
4 files changed, 46 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cbae19bab4bf..eccb8e406e9c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -635,6 +635,7 @@ struct intel_context {
635 /* Execlists */ 635 /* Execlists */
636 struct { 636 struct {
637 struct drm_i915_gem_object *state; 637 struct drm_i915_gem_object *state;
638 struct intel_ringbuffer *ringbuf;
638 } engine[I915_NUM_RINGS]; 639 } engine[I915_NUM_RINGS];
639 640
640 struct list_head link; 641 struct list_head link;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9f30ee80e487..0c80bb1f5420 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -65,7 +65,11 @@ void intel_lr_context_free(struct intel_context *ctx)
65 65
66 for (i = 0; i < I915_NUM_RINGS; i++) { 66 for (i = 0; i < I915_NUM_RINGS; i++) {
67 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 67 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
68 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
69
68 if (ctx_obj) { 70 if (ctx_obj) {
71 intel_destroy_ringbuffer_obj(ringbuf);
72 kfree(ringbuf);
69 i915_gem_object_ggtt_unpin(ctx_obj); 73 i915_gem_object_ggtt_unpin(ctx_obj);
70 drm_gem_object_unreference(&ctx_obj->base); 74 drm_gem_object_unreference(&ctx_obj->base);
71 } 75 }
@@ -99,6 +103,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
99 struct drm_device *dev = ring->dev; 103 struct drm_device *dev = ring->dev;
100 struct drm_i915_gem_object *ctx_obj; 104 struct drm_i915_gem_object *ctx_obj;
101 uint32_t context_size; 105 uint32_t context_size;
106 struct intel_ringbuffer *ringbuf;
102 int ret; 107 int ret;
103 108
104 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 109 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
@@ -119,6 +124,39 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
119 return ret; 124 return ret;
120 } 125 }
121 126
127 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
128 if (!ringbuf) {
129 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
130 ring->name);
131 i915_gem_object_ggtt_unpin(ctx_obj);
132 drm_gem_object_unreference(&ctx_obj->base);
133 ret = -ENOMEM;
134 return ret;
135 }
136
137 ringbuf->size = 32 * PAGE_SIZE;
138 ringbuf->effective_size = ringbuf->size;
139 ringbuf->head = 0;
140 ringbuf->tail = 0;
141 ringbuf->space = ringbuf->size;
142 ringbuf->last_retired_head = -1;
143
144 /* TODO: For now we put this in the mappable region so that we can reuse
145 * the existing ringbuffer code which ioremaps it. When we start
146 * creating many contexts, this will no longer work and we must switch
147 * to a kmapish interface.
148 */
149 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
150 if (ret) {
151 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
152 ring->name, ret);
153 kfree(ringbuf);
154 i915_gem_object_ggtt_unpin(ctx_obj);
155 drm_gem_object_unreference(&ctx_obj->base);
156 return ret;
157 }
158
159 ctx->engine[ring->id].ringbuf = ringbuf;
122 ctx->engine[ring->id].state = ctx_obj; 160 ctx->engine[ring->id].state = ctx_obj;
123 161
124 return 0; 162 return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a059b64a0fb2..064652034d7e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1519,7 +1519,7 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1519 return 0; 1519 return 0;
1520} 1520}
1521 1521
1522static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1522void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1523{ 1523{
1524 if (!ringbuf->obj) 1524 if (!ringbuf->obj)
1525 return; 1525 return;
@@ -1530,8 +1530,8 @@ static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1530 ringbuf->obj = NULL; 1530 ringbuf->obj = NULL;
1531} 1531}
1532 1532
1533static int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1533int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1534 struct intel_ringbuffer *ringbuf) 1534 struct intel_ringbuffer *ringbuf)
1535{ 1535{
1536 struct drm_i915_private *dev_priv = to_i915(dev); 1536 struct drm_i915_private *dev_priv = to_i915(dev);
1537 struct drm_i915_gem_object *obj; 1537 struct drm_i915_gem_object *obj;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 70525d0c2c74..669cc7527f9a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -355,6 +355,10 @@ intel_write_status_page(struct intel_engine_cs *ring,
355#define I915_GEM_HWS_SCRATCH_INDEX 0x30 355#define I915_GEM_HWS_SCRATCH_INDEX 0x30
356#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 356#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
357 357
358void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
359int intel_alloc_ringbuffer_obj(struct drm_device *dev,
360 struct intel_ringbuffer *ringbuf);
361
358void intel_stop_ring_buffer(struct intel_engine_cs *ring); 362void intel_stop_ring_buffer(struct intel_engine_cs *ring);
359void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); 363void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
360 364