aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-10-14 10:03:58 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2010-10-19 04:19:47 -0400
commitfbd5a26d500c7cd8943cc5f37ccc7e49cf386053 (patch)
tree28d8aa6e9d9818589f2abee0ca1f798c3cd63255 /drivers
parentb5e4feb6615fe07150f05bb0e0ccc0ff9138b9ec (diff)
drm/i915: Rearrange acquisition of mutex during pwrite
... to avoid reacquiring it to drop the object reference count on exit. Note we have to make sure we now drop (and reacquire) the lock around acquiring the mm semaphore on the slow paths. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c170
1 files changed, 65 insertions, 105 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b44c09ab8928..1177ff577914 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -635,9 +635,7 @@ fast_user_write(struct io_mapping *mapping,
635 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, 635 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
636 user_data, length); 636 user_data, length);
637 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); 637 io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
638 if (unwritten) 638 return unwritten;
639 return -EFAULT;
640 return 0;
641} 639}
642 640
643/* Here's the write path which can sleep for 641/* Here's the write path which can sleep for
@@ -670,14 +668,14 @@ fast_shmem_write(struct page **pages,
670 char __user *data, 668 char __user *data,
671 int length) 669 int length)
672{ 670{
673 int unwritten;
674 char *vaddr; 671 char *vaddr;
672 int ret;
675 673
676 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); 674 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
677 unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length); 675 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
678 kunmap_atomic(vaddr, KM_USER0); 676 kunmap_atomic(vaddr, KM_USER0);
679 677
680 return unwritten ? -EFAULT : 0; 678 return ret;
681} 679}
682 680
683/** 681/**
@@ -695,24 +693,10 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
695 loff_t offset, page_base; 693 loff_t offset, page_base;
696 char __user *user_data; 694 char __user *user_data;
697 int page_offset, page_length; 695 int page_offset, page_length;
698 int ret;
699 696
700 user_data = (char __user *) (uintptr_t) args->data_ptr; 697 user_data = (char __user *) (uintptr_t) args->data_ptr;
701 remain = args->size; 698 remain = args->size;
702 699
703 ret = i915_mutex_lock_interruptible(dev);
704 if (ret)
705 return ret;
706
707 ret = i915_gem_object_pin(obj, 0);
708 if (ret) {
709 mutex_unlock(&dev->struct_mutex);
710 return ret;
711 }
712 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
713 if (ret)
714 goto fail;
715
716 obj_priv = to_intel_bo(obj); 700 obj_priv = to_intel_bo(obj);
717 offset = obj_priv->gtt_offset + args->offset; 701 offset = obj_priv->gtt_offset + args->offset;
718 702
@@ -729,26 +713,21 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
729 if ((page_offset + remain) > PAGE_SIZE) 713 if ((page_offset + remain) > PAGE_SIZE)
730 page_length = PAGE_SIZE - page_offset; 714 page_length = PAGE_SIZE - page_offset;
731 715
732 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
733 page_offset, user_data, page_length);
734
735 /* If we get a fault while copying data, then (presumably) our 716 /* If we get a fault while copying data, then (presumably) our
736 * source page isn't available. Return the error and we'll 717 * source page isn't available. Return the error and we'll
737 * retry in the slow path. 718 * retry in the slow path.
738 */ 719 */
739 if (ret) 720 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
740 goto fail; 721 page_offset, user_data, page_length))
722
723 return -EFAULT;
741 724
742 remain -= page_length; 725 remain -= page_length;
743 user_data += page_length; 726 user_data += page_length;
744 offset += page_length; 727 offset += page_length;
745 } 728 }
746 729
747fail: 730 return 0;
748 i915_gem_object_unpin(obj);
749 mutex_unlock(&dev->struct_mutex);
750
751 return ret;
752} 731}
753 732
754/** 733/**
@@ -785,30 +764,24 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
785 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 764 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
786 num_pages = last_data_page - first_data_page + 1; 765 num_pages = last_data_page - first_data_page + 1;
787 766
788 user_pages = drm_calloc_large(num_pages, sizeof(struct page *)); 767 user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
789 if (user_pages == NULL) 768 if (user_pages == NULL)
790 return -ENOMEM; 769 return -ENOMEM;
791 770
771 mutex_unlock(&dev->struct_mutex);
792 down_read(&mm->mmap_sem); 772 down_read(&mm->mmap_sem);
793 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 773 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
794 num_pages, 0, 0, user_pages, NULL); 774 num_pages, 0, 0, user_pages, NULL);
795 up_read(&mm->mmap_sem); 775 up_read(&mm->mmap_sem);
776 mutex_lock(&dev->struct_mutex);
796 if (pinned_pages < num_pages) { 777 if (pinned_pages < num_pages) {
797 ret = -EFAULT; 778 ret = -EFAULT;
798 goto out_unpin_pages; 779 goto out_unpin_pages;
799 } 780 }
800 781
801 ret = i915_mutex_lock_interruptible(dev);
802 if (ret)
803 goto out_unpin_pages;
804
805 ret = i915_gem_object_pin(obj, 0);
806 if (ret)
807 goto out_unlock;
808
809 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 782 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
810 if (ret) 783 if (ret)
811 goto out_unpin_object; 784 goto out_unpin_pages;
812 785
813 obj_priv = to_intel_bo(obj); 786 obj_priv = to_intel_bo(obj);
814 offset = obj_priv->gtt_offset + args->offset; 787 offset = obj_priv->gtt_offset + args->offset;
@@ -844,10 +817,6 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
844 data_ptr += page_length; 817 data_ptr += page_length;
845 } 818 }
846 819
847out_unpin_object:
848 i915_gem_object_unpin(obj);
849out_unlock:
850 mutex_unlock(&dev->struct_mutex);
851out_unpin_pages: 820out_unpin_pages:
852 for (i = 0; i < pinned_pages; i++) 821 for (i = 0; i < pinned_pages; i++)
853 page_cache_release(user_pages[i]); 822 page_cache_release(user_pages[i]);
@@ -870,23 +839,10 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
870 loff_t offset, page_base; 839 loff_t offset, page_base;
871 char __user *user_data; 840 char __user *user_data;
872 int page_offset, page_length; 841 int page_offset, page_length;
873 int ret;
874 842
875 user_data = (char __user *) (uintptr_t) args->data_ptr; 843 user_data = (char __user *) (uintptr_t) args->data_ptr;
876 remain = args->size; 844 remain = args->size;
877 845
878 ret = i915_mutex_lock_interruptible(dev);
879 if (ret)
880 return ret;
881
882 ret = i915_gem_object_get_pages(obj, 0);
883 if (ret != 0)
884 goto fail_unlock;
885
886 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
887 if (ret != 0)
888 goto fail_put_pages;
889
890 obj_priv = to_intel_bo(obj); 846 obj_priv = to_intel_bo(obj);
891 offset = args->offset; 847 offset = args->offset;
892 obj_priv->dirty = 1; 848 obj_priv->dirty = 1;
@@ -904,23 +860,17 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
904 if ((page_offset + remain) > PAGE_SIZE) 860 if ((page_offset + remain) > PAGE_SIZE)
905 page_length = PAGE_SIZE - page_offset; 861 page_length = PAGE_SIZE - page_offset;
906 862
907 ret = fast_shmem_write(obj_priv->pages, 863 if (fast_shmem_write(obj_priv->pages,
908 page_base, page_offset, 864 page_base, page_offset,
909 user_data, page_length); 865 user_data, page_length))
910 if (ret) 866 return -EFAULT;
911 goto fail_put_pages;
912 867
913 remain -= page_length; 868 remain -= page_length;
914 user_data += page_length; 869 user_data += page_length;
915 offset += page_length; 870 offset += page_length;
916 } 871 }
917 872
918fail_put_pages: 873 return 0;
919 i915_gem_object_put_pages(obj);
920fail_unlock:
921 mutex_unlock(&dev->struct_mutex);
922
923 return ret;
924} 874}
925 875
926/** 876/**
@@ -962,28 +912,22 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
962 if (user_pages == NULL) 912 if (user_pages == NULL)
963 return -ENOMEM; 913 return -ENOMEM;
964 914
915 mutex_unlock(&dev->struct_mutex);
965 down_read(&mm->mmap_sem); 916 down_read(&mm->mmap_sem);
966 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, 917 pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
967 num_pages, 0, 0, user_pages, NULL); 918 num_pages, 0, 0, user_pages, NULL);
968 up_read(&mm->mmap_sem); 919 up_read(&mm->mmap_sem);
920 mutex_lock(&dev->struct_mutex);
969 if (pinned_pages < num_pages) { 921 if (pinned_pages < num_pages) {
970 ret = -EFAULT; 922 ret = -EFAULT;
971 goto fail_put_user_pages; 923 goto out;
972 } 924 }
973 925
974 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 926 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
975
976 ret = i915_mutex_lock_interruptible(dev);
977 if (ret)
978 goto fail_put_user_pages;
979
980 ret = i915_gem_object_get_pages_or_evict(obj);
981 if (ret) 927 if (ret)
982 goto fail_unlock; 928 goto out;
983 929
984 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 930 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
985 if (ret != 0)
986 goto fail_put_pages;
987 931
988 obj_priv = to_intel_bo(obj); 932 obj_priv = to_intel_bo(obj);
989 offset = args->offset; 933 offset = args->offset;
@@ -1029,11 +973,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
1029 offset += page_length; 973 offset += page_length;
1030 } 974 }
1031 975
1032fail_put_pages: 976out:
1033 i915_gem_object_put_pages(obj);
1034fail_unlock:
1035 mutex_unlock(&dev->struct_mutex);
1036fail_put_user_pages:
1037 for (i = 0; i < pinned_pages; i++) 977 for (i = 0; i < pinned_pages; i++)
1038 page_cache_release(user_pages[i]); 978 page_cache_release(user_pages[i]);
1039 drm_free_large(user_pages); 979 drm_free_large(user_pages);
@@ -1048,18 +988,24 @@ fail_put_user_pages:
1048 */ 988 */
1049int 989int
1050i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 990i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1051 struct drm_file *file_priv) 991 struct drm_file *file)
1052{ 992{
1053 struct drm_i915_gem_pwrite *args = data; 993 struct drm_i915_gem_pwrite *args = data;
1054 struct drm_gem_object *obj; 994 struct drm_gem_object *obj;
1055 struct drm_i915_gem_object *obj_priv; 995 struct drm_i915_gem_object *obj_priv;
1056 int ret = 0; 996 int ret = 0;
1057 997
1058 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 998 obj = drm_gem_object_lookup(dev, file, args->handle);
1059 if (obj == NULL) 999 if (obj == NULL)
1060 return -ENOENT; 1000 return -ENOENT;
1061 obj_priv = to_intel_bo(obj); 1001 obj_priv = to_intel_bo(obj);
1062 1002
1003 ret = i915_mutex_lock_interruptible(dev);
1004 if (ret) {
1005 drm_gem_object_unreference_unlocked(obj);
1006 return ret;
1007 }
1008
1063 /* Bounds check destination. */ 1009 /* Bounds check destination. */
1064 if (args->offset > obj->size || args->size > obj->size - args->offset) { 1010 if (args->offset > obj->size || args->size > obj->size - args->offset) {
1065 ret = -EINVAL; 1011 ret = -EINVAL;
@@ -1090,32 +1036,46 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1090 * perspective, requiring manual detiling by the client. 1036 * perspective, requiring manual detiling by the client.
1091 */ 1037 */
1092 if (obj_priv->phys_obj) 1038 if (obj_priv->phys_obj)
1093 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); 1039 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1094 else if (obj_priv->tiling_mode == I915_TILING_NONE && 1040 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
1095 obj_priv->gtt_space && 1041 obj_priv->gtt_space &&
1096 obj->write_domain != I915_GEM_DOMAIN_CPU) { 1042 obj->write_domain != I915_GEM_DOMAIN_CPU) {
1097 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv); 1043 ret = i915_gem_object_pin(obj, 0);
1098 if (ret == -EFAULT) { 1044 if (ret)
1099 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, 1045 goto out;
1100 file_priv); 1046
1101 } 1047 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
1102 } else if (i915_gem_object_needs_bit17_swizzle(obj)) { 1048 if (ret)
1103 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); 1049 goto out_unpin;
1050
1051 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1052 if (ret == -EFAULT)
1053 ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file);
1054
1055out_unpin:
1056 i915_gem_object_unpin(obj);
1104 } else { 1057 } else {
1105 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); 1058 ret = i915_gem_object_get_pages_or_evict(obj);
1106 if (ret == -EFAULT) { 1059 if (ret)
1107 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, 1060 goto out;
1108 file_priv);
1109 }
1110 }
1111 1061
1112#if WATCH_PWRITE 1062 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1113 if (ret) 1063 if (ret)
1114 DRM_INFO("pwrite failed %d\n", ret); 1064 goto out_put;
1115#endif 1065
1066 ret = -EFAULT;
1067 if (!i915_gem_object_needs_bit17_swizzle(obj))
1068 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1069 if (ret == -EFAULT)
1070 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1071
1072out_put:
1073 i915_gem_object_put_pages(obj);
1074 }
1116 1075
1117out: 1076out:
1118 drm_gem_object_unreference_unlocked(obj); 1077 drm_gem_object_unreference(obj);
1078 mutex_unlock(&dev->struct_mutex);
1119 return ret; 1079 return ret;
1120} 1080}
1121 1081