aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2012-08-24 14:18:18 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-08-24 15:02:51 -0400
commit7788a765205f63abcb8645c16c85a968bd578f4f (patch)
tree9e3497c7c370c514a7a5974ea493fc1aeb607b04
parent050ee91f128bd767b1413383fea6c973aa464c54 (diff)
drm/i915: Avoid unbinding due to an interrupted pin_and_fence during execbuffer
If we need to stall in order to complete the pin_and_fence operation during execbuffer reservation, there is a high likelihood that the operation will be interrupted by a signal (thanks X!). In order to simplify the cleanup along that error path, the object was unconditionally unbound and the error propagated. However, being interrupted here is far more common than I would like and so we can strive to avoid the extra work by eliminating the forced unbind. v2: In discussion over the indecent colour of the new functions and unwind path, we realised that we can use the new unreserve function to clean up the code even further. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c114
1 files changed, 45 insertions, 69 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dc87563440f9..e6b2205ecf6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -331,7 +331,8 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
331 return ret; 331 return ret;
332} 332}
333 333
334#define __EXEC_OBJECT_HAS_FENCE (1<<31) 334#define __EXEC_OBJECT_HAS_PIN (1<<31)
335#define __EXEC_OBJECT_HAS_FENCE (1<<30)
335 336
336static int 337static int
337need_reloc_mappable(struct drm_i915_gem_object *obj) 338need_reloc_mappable(struct drm_i915_gem_object *obj)
@@ -341,9 +342,10 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
341} 342}
342 343
343static int 344static int
344pin_and_fence_object(struct drm_i915_gem_object *obj, 345i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
345 struct intel_ring_buffer *ring) 346 struct intel_ring_buffer *ring)
346{ 347{
348 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
347 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 349 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
348 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; 350 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
349 bool need_fence, need_mappable; 351 bool need_fence, need_mappable;
@@ -359,11 +361,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
359 if (ret) 361 if (ret)
360 return ret; 362 return ret;
361 363
364 entry->flags |= __EXEC_OBJECT_HAS_PIN;
365
362 if (has_fenced_gpu_access) { 366 if (has_fenced_gpu_access) {
363 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 367 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
364 ret = i915_gem_object_get_fence(obj); 368 ret = i915_gem_object_get_fence(obj);
365 if (ret) 369 if (ret)
366 goto err_unpin; 370 return ret;
367 371
368 if (i915_gem_object_pin_fence(obj)) 372 if (i915_gem_object_pin_fence(obj))
369 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 373 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
@@ -372,12 +376,35 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
372 } 376 }
373 } 377 }
374 378
379 /* Ensure ppgtt mapping exists if needed */
380 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
381 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
382 obj, obj->cache_level);
383
384 obj->has_aliasing_ppgtt_mapping = 1;
385 }
386
375 entry->offset = obj->gtt_offset; 387 entry->offset = obj->gtt_offset;
376 return 0; 388 return 0;
389}
377 390
378err_unpin: 391static void
379 i915_gem_object_unpin(obj); 392i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
380 return ret; 393{
394 struct drm_i915_gem_exec_object2 *entry;
395
396 if (!obj->gtt_space)
397 return;
398
399 entry = obj->exec_entry;
400
401 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
402 i915_gem_object_unpin_fence(obj);
403
404 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
405 i915_gem_object_unpin(obj);
406
407 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
381} 408}
382 409
383static int 410static int
@@ -385,11 +412,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
385 struct drm_file *file, 412 struct drm_file *file,
386 struct list_head *objects) 413 struct list_head *objects)
387{ 414{
388 drm_i915_private_t *dev_priv = ring->dev->dev_private;
389 struct drm_i915_gem_object *obj; 415 struct drm_i915_gem_object *obj;
390 int ret, retry;
391 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
392 struct list_head ordered_objects; 416 struct list_head ordered_objects;
417 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
418 int retry;
393 419
394 INIT_LIST_HEAD(&ordered_objects); 420 INIT_LIST_HEAD(&ordered_objects);
395 while (!list_empty(objects)) { 421 while (!list_empty(objects)) {
@@ -427,12 +453,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
427 * 2. Bind new objects. 453 * 2. Bind new objects.
428 * 3. Decrement pin count. 454 * 3. Decrement pin count.
429 * 455 *
430 * This avoid unnecessary unbinding of later objects in order to makr 456 * This avoid unnecessary unbinding of later objects in order to make
431 * room for the earlier objects *unless* we need to defragment. 457 * room for the earlier objects *unless* we need to defragment.
432 */ 458 */
433 retry = 0; 459 retry = 0;
434 do { 460 do {
435 ret = 0; 461 int ret = 0;
436 462
437 /* Unbind any ill-fitting objects or pin. */ 463 /* Unbind any ill-fitting objects or pin. */
438 list_for_each_entry(obj, objects, exec_list) { 464 list_for_each_entry(obj, objects, exec_list) {
@@ -452,7 +478,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
452 (need_mappable && !obj->map_and_fenceable)) 478 (need_mappable && !obj->map_and_fenceable))
453 ret = i915_gem_object_unbind(obj); 479 ret = i915_gem_object_unbind(obj);
454 else 480 else
455 ret = pin_and_fence_object(obj, ring); 481 ret = i915_gem_execbuffer_reserve_object(obj, ring);
456 if (ret) 482 if (ret)
457 goto err; 483 goto err;
458 } 484 }
@@ -462,46 +488,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
462 if (obj->gtt_space) 488 if (obj->gtt_space)
463 continue; 489 continue;
464 490
465 ret = pin_and_fence_object(obj, ring); 491 ret = i915_gem_execbuffer_reserve_object(obj, ring);
466 if (ret) { 492 if (ret)
467 int ret_ignore; 493 goto err;
468
469 /* This can potentially raise a harmless
470 * -EINVAL if we failed to bind in the above
471 * call. It cannot raise -EINTR since we know
472 * that the bo is freshly bound and so will
473 * not need to be flushed or waited upon.
474 */
475 ret_ignore = i915_gem_object_unbind(obj);
476 (void)ret_ignore;
477 WARN_ON(obj->gtt_space);
478 break;
479 }
480 } 494 }
481 495
482 /* Decrement pin count for bound objects */ 496err: /* Decrement pin count for bound objects */
483 list_for_each_entry(obj, objects, exec_list) { 497 list_for_each_entry(obj, objects, exec_list)
484 struct drm_i915_gem_exec_object2 *entry; 498 i915_gem_execbuffer_unreserve_object(obj);
485
486 if (!obj->gtt_space)
487 continue;
488
489 entry = obj->exec_entry;
490 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
491 i915_gem_object_unpin_fence(obj);
492 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
493 }
494
495 i915_gem_object_unpin(obj);
496
497 /* ... and ensure ppgtt mapping exist if needed. */
498 if (dev_priv->mm.aliasing_ppgtt && !obj->has_aliasing_ppgtt_mapping) {
499 i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
500 obj, obj->cache_level);
501
502 obj->has_aliasing_ppgtt_mapping = 1;
503 }
504 }
505 499
506 if (ret != -ENOSPC || retry++) 500 if (ret != -ENOSPC || retry++)
507 return ret; 501 return ret;
@@ -510,24 +504,6 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
510 if (ret) 504 if (ret)
511 return ret; 505 return ret;
512 } while (1); 506 } while (1);
513
514err:
515 list_for_each_entry_continue_reverse(obj, objects, exec_list) {
516 struct drm_i915_gem_exec_object2 *entry;
517
518 if (!obj->gtt_space)
519 continue;
520
521 entry = obj->exec_entry;
522 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
523 i915_gem_object_unpin_fence(obj);
524 entry->flags &= ~__EXEC_OBJECT_HAS_FENCE;
525 }
526
527 i915_gem_object_unpin(obj);
528 }
529
530 return ret;
531} 507}
532 508
533static int 509static int