aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-08-18 12:16:50 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2016-08-18 17:36:45 -0400
commit9764951e7f517717bc7ecc3f1a9711816646ebf7 (patch)
tree8b1c971efc4773effa1fe21ba31ac49dd9334ef9
parent3b5724d702ef24ee41ca008a1fab1cf94f3d31b5 (diff)
drm/i915: Pin the pages first in shmem prepare read/write
There is an improbable, but not impossible, case that if we leave the pages unpin as we operate on the object, then somebody via the shrinker may steal the lock (which lock? right now, it is struct_mutex, THE lock) and change the cache domains after we have already inspected them. (Whilst here, avail ourselves of the opportunity to take a couple of steps to make the two functions look more similar.) Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-11-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c48
1 files changed, 28 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4c9ecab765f1..c5d5dfe3e0ef 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -622,6 +622,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
622 if (ret) 622 if (ret)
623 return ret; 623 return ret;
624 624
625 ret = i915_gem_object_get_pages(obj);
626 if (ret)
627 return ret;
628
629 i915_gem_object_pin_pages(obj);
630
625 i915_gem_object_flush_gtt_write_domain(obj); 631 i915_gem_object_flush_gtt_write_domain(obj);
626 632
627 /* If we're not in the cpu read domain, set ourself into the gtt 633 /* If we're not in the cpu read domain, set ourself into the gtt
@@ -633,22 +639,20 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
633 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev, 639 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
634 obj->cache_level); 640 obj->cache_level);
635 641
636 ret = i915_gem_object_get_pages(obj);
637 if (ret)
638 return ret;
639
640 i915_gem_object_pin_pages(obj);
641
642 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { 642 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
643 ret = i915_gem_object_set_to_cpu_domain(obj, false); 643 ret = i915_gem_object_set_to_cpu_domain(obj, false);
644 if (ret) { 644 if (ret)
645 i915_gem_object_unpin_pages(obj); 645 goto err_unpin;
646 return ret; 646
647 }
648 *needs_clflush = 0; 647 *needs_clflush = 0;
649 } 648 }
650 649
650 /* return with the pages pinned */
651 return 0; 651 return 0;
652
653err_unpin:
654 i915_gem_object_unpin_pages(obj);
655 return ret;
652} 656}
653 657
654int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, 658int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
@@ -664,6 +668,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
664 if (ret) 668 if (ret)
665 return ret; 669 return ret;
666 670
671 ret = i915_gem_object_get_pages(obj);
672 if (ret)
673 return ret;
674
675 i915_gem_object_pin_pages(obj);
676
667 i915_gem_object_flush_gtt_write_domain(obj); 677 i915_gem_object_flush_gtt_write_domain(obj);
668 678
669 /* If we're not in the cpu write domain, set ourself into the 679 /* If we're not in the cpu write domain, set ourself into the
@@ -681,18 +691,11 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
681 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, 691 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
682 obj->cache_level); 692 obj->cache_level);
683 693
684 ret = i915_gem_object_get_pages(obj);
685 if (ret)
686 return ret;
687
688 i915_gem_object_pin_pages(obj);
689
690 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { 694 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
691 ret = i915_gem_object_set_to_cpu_domain(obj, true); 695 ret = i915_gem_object_set_to_cpu_domain(obj, true);
692 if (ret) { 696 if (ret)
693 i915_gem_object_unpin_pages(obj); 697 goto err_unpin;
694 return ret; 698
695 }
696 *needs_clflush = 0; 699 *needs_clflush = 0;
697 } 700 }
698 701
@@ -701,7 +704,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
701 704
702 intel_fb_obj_invalidate(obj, ORIGIN_CPU); 705 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
703 obj->dirty = 1; 706 obj->dirty = 1;
707 /* return with the pages pinned */
704 return 0; 708 return 0;
709
710err_unpin:
711 i915_gem_object_unpin_pages(obj);
712 return ret;
705} 713}
706 714
707/* Per-page copy function for the shmem pread fastpath. 715/* Per-page copy function for the shmem pread fastpath.