aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Gordon <david.s.gordon@intel.com>2016-01-19 14:02:53 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-01-21 03:21:29 -0500
commit26827088392196d0e8464dae599bd5ff9992cb82 (patch)
tree4b8ff19116660e2b2b30ec515db5c029b88412e4
parente794129444aba459e9bedf5080bfb4605f933c32 (diff)
drm/i915: simplify allocation of driver-internal requests
There are a number of places where the driver needs a request, but isn't working on behalf of any specific user or in a specific context. At present, we associate them with the per-engine default context. A future patch will abolish those per-engine context pointers; but we can already eliminate a lot of the references to them, just by making the allocator allow NULL as a shorthand for "an appropriate context for this ring", which will mean that the callers don't need to know anything about how the "appropriate context" is found (e.g. per-ring vs per-device, etc). So this patch renames the existing i915_gem_request_alloc(), and makes it local (static inline), and replaces it with a wrapper that provides a default if the context is NULL, and also has a nicer calling convention (doesn't require a pointer to an output parameter). Then we change all callers to use the new convention: OLD: err = i915_gem_request_alloc(ring, user_ctx, &req); if (err) ... NEW: req = i915_gem_request_alloc(ring, user_ctx); if (IS_ERR(req)) ... OLD: err = i915_gem_request_alloc(ring, ring->default_context, &req); if (err) ... NEW: req = i915_gem_request_alloc(ring, NULL); if (IS_ERR(req)) ... v4: Rebased Signed-off-by: Dave Gordon <david.s.gordon@intel.com> Reviewed-by: Nick Hoath <nicholas.hoath@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-2-git-send-email-david.s.gordon@intel.com Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c9
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
6 files changed, 74 insertions, 40 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d3b98c228683..125659488756 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2268,9 +2268,9 @@ struct drm_i915_gem_request {
2268 2268
2269}; 2269};
2270 2270
2271int i915_gem_request_alloc(struct intel_engine_cs *ring, 2271struct drm_i915_gem_request * __must_check
2272 struct intel_context *ctx, 2272i915_gem_request_alloc(struct intel_engine_cs *engine,
2273 struct drm_i915_gem_request **req_out); 2273 struct intel_context *ctx);
2274void i915_gem_request_cancel(struct drm_i915_gem_request *req); 2274void i915_gem_request_cancel(struct drm_i915_gem_request *req);
2275void i915_gem_request_free(struct kref *req_ref); 2275void i915_gem_request_free(struct kref *req_ref);
2276int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2276int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6b0102da859c..8e716b6b5d59 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2690,9 +2690,10 @@ void i915_gem_request_free(struct kref *req_ref)
2690 kmem_cache_free(req->i915->requests, req); 2690 kmem_cache_free(req->i915->requests, req);
2691} 2691}
2692 2692
2693int i915_gem_request_alloc(struct intel_engine_cs *ring, 2693static inline int
2694 struct intel_context *ctx, 2694__i915_gem_request_alloc(struct intel_engine_cs *ring,
2695 struct drm_i915_gem_request **req_out) 2695 struct intel_context *ctx,
2696 struct drm_i915_gem_request **req_out)
2696{ 2697{
2697 struct drm_i915_private *dev_priv = to_i915(ring->dev); 2698 struct drm_i915_private *dev_priv = to_i915(ring->dev);
2698 struct drm_i915_gem_request *req; 2699 struct drm_i915_gem_request *req;
@@ -2755,6 +2756,31 @@ err:
2755 return ret; 2756 return ret;
2756} 2757}
2757 2758
2759/**
2760 * i915_gem_request_alloc - allocate a request structure
2761 *
2762 * @engine: engine that we wish to issue the request on.
2763 * @ctx: context that the request will be associated with.
2764 * This can be NULL if the request is not directly related to
2765 * any specific user context, in which case this function will
2766 * choose an appropriate context to use.
2767 *
2768 * Returns a pointer to the allocated request if successful,
2769 * or an error code if not.
2770 */
2771struct drm_i915_gem_request *
2772i915_gem_request_alloc(struct intel_engine_cs *engine,
2773 struct intel_context *ctx)
2774{
2775 struct drm_i915_gem_request *req;
2776 int err;
2777
2778 if (ctx == NULL)
2779 ctx = engine->default_context;
2780 err = __i915_gem_request_alloc(engine, ctx, &req);
2781 return err ? ERR_PTR(err) : req;
2782}
2783
2758void i915_gem_request_cancel(struct drm_i915_gem_request *req) 2784void i915_gem_request_cancel(struct drm_i915_gem_request *req)
2759{ 2785{
2760 intel_ring_reserved_space_cancel(req->ringbuf); 2786 intel_ring_reserved_space_cancel(req->ringbuf);
@@ -3172,9 +3198,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3172 return 0; 3198 return 0;
3173 3199
3174 if (*to_req == NULL) { 3200 if (*to_req == NULL) {
3175 ret = i915_gem_request_alloc(to, to->default_context, to_req); 3201 struct drm_i915_gem_request *req;
3176 if (ret) 3202
3177 return ret; 3203 req = i915_gem_request_alloc(to, NULL);
3204 if (IS_ERR(req))
3205 return PTR_ERR(req);
3206
3207 *to_req = req;
3178 } 3208 }
3179 3209
3180 trace_i915_gem_ring_sync_to(*to_req, from, from_req); 3210 trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3374,9 +3404,9 @@ int i915_gpu_idle(struct drm_device *dev)
3374 if (!i915.enable_execlists) { 3404 if (!i915.enable_execlists) {
3375 struct drm_i915_gem_request *req; 3405 struct drm_i915_gem_request *req;
3376 3406
3377 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 3407 req = i915_gem_request_alloc(ring, NULL);
3378 if (ret) 3408 if (IS_ERR(req))
3379 return ret; 3409 return PTR_ERR(req);
3380 3410
3381 ret = i915_switch_context(req); 3411 ret = i915_switch_context(req);
3382 if (ret) { 3412 if (ret) {
@@ -4871,10 +4901,9 @@ i915_gem_init_hw(struct drm_device *dev)
4871 for_each_ring(ring, dev_priv, i) { 4901 for_each_ring(ring, dev_priv, i) {
4872 struct drm_i915_gem_request *req; 4902 struct drm_i915_gem_request *req;
4873 4903
4874 WARN_ON(!ring->default_context); 4904 req = i915_gem_request_alloc(ring, NULL);
4875 4905 if (IS_ERR(req)) {
4876 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 4906 ret = PTR_ERR(req);
4877 if (ret) {
4878 i915_gem_cleanup_ringbuffer(dev); 4907 i915_gem_cleanup_ringbuffer(dev);
4879 goto out; 4908 goto out;
4880 } 4909 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 4edf1c062210..dc32018ee2e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1381,6 +1381,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1381 struct drm_i915_gem_exec_object2 *exec) 1381 struct drm_i915_gem_exec_object2 *exec)
1382{ 1382{
1383 struct drm_i915_private *dev_priv = dev->dev_private; 1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384 struct drm_i915_gem_request *req = NULL;
1384 struct eb_vmas *eb; 1385 struct eb_vmas *eb;
1385 struct drm_i915_gem_object *batch_obj; 1386 struct drm_i915_gem_object *batch_obj;
1386 struct drm_i915_gem_exec_object2 shadow_exec_entry; 1387 struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1602,11 +1603,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1602 params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); 1603 params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1603 1604
1604 /* Allocate a request for this batch buffer nice and early. */ 1605 /* Allocate a request for this batch buffer nice and early. */
1605 ret = i915_gem_request_alloc(ring, ctx, &params->request); 1606 req = i915_gem_request_alloc(ring, ctx);
1606 if (ret) 1607 if (IS_ERR(req)) {
1608 ret = PTR_ERR(req);
1607 goto err_batch_unpin; 1609 goto err_batch_unpin;
1610 }
1608 1611
1609 ret = i915_gem_request_add_to_client(params->request, file); 1612 ret = i915_gem_request_add_to_client(req, file);
1610 if (ret) 1613 if (ret)
1611 goto err_batch_unpin; 1614 goto err_batch_unpin;
1612 1615
@@ -1622,6 +1625,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1622 params->dispatch_flags = dispatch_flags; 1625 params->dispatch_flags = dispatch_flags;
1623 params->batch_obj = batch_obj; 1626 params->batch_obj = batch_obj;
1624 params->ctx = ctx; 1627 params->ctx = ctx;
1628 params->request = req;
1625 1629
1626 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); 1630 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1627 1631
@@ -1645,8 +1649,8 @@ err:
1645 * must be freed again. If it was submitted then it is being tracked 1649 * must be freed again. If it was submitted then it is being tracked
1646 * on the active request list and no clean up is required here. 1650 * on the active request list and no clean up is required here.
1647 */ 1651 */
1648 if (ret && params->request) 1652 if (ret && req)
1649 i915_gem_request_cancel(params->request); 1653 i915_gem_request_cancel(req);
1650 1654
1651 mutex_unlock(&dev->struct_mutex); 1655 mutex_unlock(&dev->struct_mutex);
1652 1656
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 06ab6df8ad48..8104511ad302 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11690,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11690 obj->last_write_req); 11690 obj->last_write_req);
11691 } else { 11691 } else {
11692 if (!request) { 11692 if (!request) {
11693 ret = i915_gem_request_alloc(ring, ring->default_context, &request); 11693 request = i915_gem_request_alloc(ring, NULL);
11694 if (ret) 11694 if (IS_ERR(request)) {
11695 ret = PTR_ERR(request);
11695 goto cleanup_unpin; 11696 goto cleanup_unpin;
11697 }
11696 } 11698 }
11697 11699
11698 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11700 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index faaf49077fea..ec2482daffa6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -2520,11 +2520,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2520 if (ctx != ring->default_context && ring->init_context) { 2520 if (ctx != ring->default_context && ring->init_context) {
2521 struct drm_i915_gem_request *req; 2521 struct drm_i915_gem_request *req;
2522 2522
2523 ret = i915_gem_request_alloc(ring, 2523 req = i915_gem_request_alloc(ring, ctx);
2524 ctx, &req); 2524 if (IS_ERR(req)) {
2525 if (ret) { 2525 ret = PTR_ERR(req);
2526 DRM_ERROR("ring create req: %d\n", 2526 DRM_ERROR("ring create req: %d\n", ret);
2527 ret);
2528 goto error_ringbuf; 2527 goto error_ringbuf;
2529 } 2528 }
2530 2529
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 76f1980a7541..9168413fe204 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
240 WARN_ON(overlay->active); 240 WARN_ON(overlay->active);
241 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 241 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
242 242
243 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 243 req = i915_gem_request_alloc(ring, NULL);
244 if (ret) 244 if (IS_ERR(req))
245 return ret; 245 return PTR_ERR(req);
246 246
247 ret = intel_ring_begin(req, 4); 247 ret = intel_ring_begin(req, 4);
248 if (ret) { 248 if (ret) {
@@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
283 if (tmp & (1 << 17)) 283 if (tmp & (1 << 17))
284 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 284 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
285 285
286 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 286 req = i915_gem_request_alloc(ring, NULL);
287 if (ret) 287 if (IS_ERR(req))
288 return ret; 288 return PTR_ERR(req);
289 289
290 ret = intel_ring_begin(req, 2); 290 ret = intel_ring_begin(req, 2);
291 if (ret) { 291 if (ret) {
@@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
349 * of the hw. Do it in both cases */ 349 * of the hw. Do it in both cases */
350 flip_addr |= OFC_UPDATE; 350 flip_addr |= OFC_UPDATE;
351 351
352 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 352 req = i915_gem_request_alloc(ring, NULL);
353 if (ret) 353 if (IS_ERR(req))
354 return ret; 354 return PTR_ERR(req);
355 355
356 ret = intel_ring_begin(req, 6); 356 ret = intel_ring_begin(req, 6);
357 if (ret) { 357 if (ret) {
@@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
423 /* synchronous slowpath */ 423 /* synchronous slowpath */
424 struct drm_i915_gem_request *req; 424 struct drm_i915_gem_request *req;
425 425
426 ret = i915_gem_request_alloc(ring, ring->default_context, &req); 426 req = i915_gem_request_alloc(ring, NULL);
427 if (ret) 427 if (IS_ERR(req))
428 return ret; 428 return PTR_ERR(req);
429 429
430 ret = intel_ring_begin(req, 2); 430 ret = intel_ring_begin(req, 2);
431 if (ret) { 431 if (ret) {