aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/omapdrm/omap_gem.c
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart@ideasonboard.com>2018-05-26 12:54:33 -0400
committerTomi Valkeinen <tomi.valkeinen@ti.com>2018-06-28 06:41:05 -0400
commit3cbd0c587b129beaefb1405bbe43831e6bc9461e (patch)
treed31a45987312997a69db3959a37b09ac06f0952c /drivers/gpu/drm/omapdrm/omap_gem.c
parentdc8c9aeee5098688c1085691213fb9a703bf20ad (diff)
drm/omap: gem: Replace struct_mutex usage with omap_obj private lock
The DRM device struct_mutex is used to protect against concurrent GEM object operations that deal with memory allocation and pinning. All those operations are local to a GEM object and don't need to be serialized across different GEM objects. Replace the struct_mutex with a local omap_obj.lock or drop it altogether where not needed. Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Tomi Valkeinen <tomi.valkeinen@ti.com> Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
Diffstat (limited to 'drivers/gpu/drm/omapdrm/omap_gem.c')
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c127
1 files changed, 85 insertions, 42 deletions
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 623856d9b85a..cebbdf081e5d 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -47,6 +47,9 @@ struct omap_gem_object {
47 /** roll applied when mapping to DMM */ 47 /** roll applied when mapping to DMM */
48 u32 roll; 48 u32 roll;
49 49
50 /** protects dma_addr_cnt, block, pages, dma_addrs and vaddr */
51 struct mutex lock;
52
50 /** 53 /**
51 * dma_addr contains the buffer DMA address. It is valid for 54 * dma_addr contains the buffer DMA address. It is valid for
52 * 55 *
@@ -220,7 +223,10 @@ static void omap_gem_evict(struct drm_gem_object *obj)
220 * Page Management 223 * Page Management
221 */ 224 */
222 225
223/* Ensure backing pages are allocated. */ 226/*
227 * Ensure backing pages are allocated. Must be called with the omap_obj.lock
228 * held.
229 */
224static int omap_gem_attach_pages(struct drm_gem_object *obj) 230static int omap_gem_attach_pages(struct drm_gem_object *obj)
225{ 231{
226 struct drm_device *dev = obj->dev; 232 struct drm_device *dev = obj->dev;
@@ -230,6 +236,8 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
230 int i, ret; 236 int i, ret;
231 dma_addr_t *addrs; 237 dma_addr_t *addrs;
232 238
239 lockdep_assert_held(&omap_obj->lock);
240
233 /* 241 /*
234 * If not using shmem (in which case backing pages don't need to be 242 * If not using shmem (in which case backing pages don't need to be
235 * allocated) or if pages are already allocated we're done. 243 * allocated) or if pages are already allocated we're done.
@@ -291,13 +299,15 @@ free_pages:
291 return ret; 299 return ret;
292} 300}
293 301
294/** release backing pages */ 302/* Release backing pages. Must be called with the omap_obj.lock held. */
295static void omap_gem_detach_pages(struct drm_gem_object *obj) 303static void omap_gem_detach_pages(struct drm_gem_object *obj)
296{ 304{
297 struct omap_gem_object *omap_obj = to_omap_bo(obj); 305 struct omap_gem_object *omap_obj = to_omap_bo(obj);
298 unsigned int npages = obj->size >> PAGE_SHIFT; 306 unsigned int npages = obj->size >> PAGE_SHIFT;
299 unsigned int i; 307 unsigned int i;
300 308
309 lockdep_assert_held(&omap_obj->lock);
310
301 for (i = 0; i < npages; i++) { 311 for (i = 0; i < npages; i++) {
302 if (omap_obj->dma_addrs[i]) 312 if (omap_obj->dma_addrs[i])
303 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], 313 dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
@@ -491,14 +501,13 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
491 struct vm_area_struct *vma = vmf->vma; 501 struct vm_area_struct *vma = vmf->vma;
492 struct drm_gem_object *obj = vma->vm_private_data; 502 struct drm_gem_object *obj = vma->vm_private_data;
493 struct omap_gem_object *omap_obj = to_omap_bo(obj); 503 struct omap_gem_object *omap_obj = to_omap_bo(obj);
494 struct drm_device *dev = obj->dev;
495 int err; 504 int err;
496 vm_fault_t ret; 505 vm_fault_t ret;
497 506
498 /* Make sure we don't parallel update on a fault, nor move or remove 507 /* Make sure we don't parallel update on a fault, nor move or remove
499 * something from beneath our feet 508 * something from beneath our feet
500 */ 509 */
501 mutex_lock(&dev->struct_mutex); 510 mutex_lock(&omap_obj->lock);
502 511
503 /* if a shmem backed object, make sure we have pages attached now */ 512 /* if a shmem backed object, make sure we have pages attached now */
504 err = omap_gem_attach_pages(obj); 513 err = omap_gem_attach_pages(obj);
@@ -520,7 +529,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf)
520 529
521 530
522fail: 531fail:
523 mutex_unlock(&dev->struct_mutex); 532 mutex_unlock(&omap_obj->lock);
524 return ret; 533 return ret;
525} 534}
526 535
@@ -654,7 +663,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
654 663
655 omap_obj->roll = roll; 664 omap_obj->roll = roll;
656 665
657 mutex_lock(&obj->dev->struct_mutex); 666 mutex_lock(&omap_obj->lock);
658 667
659 /* if we aren't mapped yet, we don't need to do anything */ 668 /* if we aren't mapped yet, we don't need to do anything */
660 if (omap_obj->block) { 669 if (omap_obj->block) {
@@ -669,7 +678,7 @@ int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
669 } 678 }
670 679
671fail: 680fail:
672 mutex_unlock(&obj->dev->struct_mutex); 681 mutex_unlock(&omap_obj->lock);
673 682
674 return ret; 683 return ret;
675} 684}
@@ -770,7 +779,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
770 struct omap_gem_object *omap_obj = to_omap_bo(obj); 779 struct omap_gem_object *omap_obj = to_omap_bo(obj);
771 int ret = 0; 780 int ret = 0;
772 781
773 mutex_lock(&obj->dev->struct_mutex); 782 mutex_lock(&omap_obj->lock);
774 783
775 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { 784 if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) {
776 if (omap_obj->dma_addr_cnt == 0) { 785 if (omap_obj->dma_addr_cnt == 0) {
@@ -826,7 +835,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
826 } 835 }
827 836
828fail: 837fail:
829 mutex_unlock(&obj->dev->struct_mutex); 838 mutex_unlock(&omap_obj->lock);
830 839
831 return ret; 840 return ret;
832} 841}
@@ -844,7 +853,8 @@ void omap_gem_unpin(struct drm_gem_object *obj)
844 struct omap_gem_object *omap_obj = to_omap_bo(obj); 853 struct omap_gem_object *omap_obj = to_omap_bo(obj);
845 int ret; 854 int ret;
846 855
847 mutex_lock(&obj->dev->struct_mutex); 856 mutex_lock(&omap_obj->lock);
857
848 if (omap_obj->dma_addr_cnt > 0) { 858 if (omap_obj->dma_addr_cnt > 0) {
849 omap_obj->dma_addr_cnt--; 859 omap_obj->dma_addr_cnt--;
850 if (omap_obj->dma_addr_cnt == 0) { 860 if (omap_obj->dma_addr_cnt == 0) {
@@ -863,7 +873,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
863 } 873 }
864 } 874 }
865 875
866 mutex_unlock(&obj->dev->struct_mutex); 876 mutex_unlock(&omap_obj->lock);
867} 877}
868 878
869/* Get rotated scanout address (only valid if already pinned), at the 879/* Get rotated scanout address (only valid if already pinned), at the
@@ -876,13 +886,16 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
876 struct omap_gem_object *omap_obj = to_omap_bo(obj); 886 struct omap_gem_object *omap_obj = to_omap_bo(obj);
877 int ret = -EINVAL; 887 int ret = -EINVAL;
878 888
879 mutex_lock(&obj->dev->struct_mutex); 889 mutex_lock(&omap_obj->lock);
890
880 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block && 891 if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block &&
881 (omap_obj->flags & OMAP_BO_TILED)) { 892 (omap_obj->flags & OMAP_BO_TILED)) {
882 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); 893 *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y);
883 ret = 0; 894 ret = 0;
884 } 895 }
885 mutex_unlock(&obj->dev->struct_mutex); 896
897 mutex_unlock(&omap_obj->lock);
898
886 return ret; 899 return ret;
887} 900}
888 901
@@ -910,18 +923,26 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
910 bool remap) 923 bool remap)
911{ 924{
912 struct omap_gem_object *omap_obj = to_omap_bo(obj); 925 struct omap_gem_object *omap_obj = to_omap_bo(obj);
913 int ret; 926 int ret = 0;
914 927
915 if (!remap) { 928 mutex_lock(&omap_obj->lock);
916 if (!omap_obj->pages) 929
917 return -ENOMEM; 930 if (remap) {
918 *pages = omap_obj->pages; 931 ret = omap_gem_attach_pages(obj);
919 return 0; 932 if (ret)
933 goto unlock;
920 } 934 }
921 mutex_lock(&obj->dev->struct_mutex); 935
922 ret = omap_gem_attach_pages(obj); 936 if (!omap_obj->pages) {
937 ret = -ENOMEM;
938 goto unlock;
939 }
940
923 *pages = omap_obj->pages; 941 *pages = omap_obj->pages;
924 mutex_unlock(&obj->dev->struct_mutex); 942
943unlock:
944 mutex_unlock(&omap_obj->lock);
945
925 return ret; 946 return ret;
926} 947}
927 948
@@ -936,24 +957,34 @@ int omap_gem_put_pages(struct drm_gem_object *obj)
936} 957}
937 958
938#ifdef CONFIG_DRM_FBDEV_EMULATION 959#ifdef CONFIG_DRM_FBDEV_EMULATION
939/* Get kernel virtual address for CPU access.. this more or less only 960/*
940 * exists for omap_fbdev. This should be called with struct_mutex 961 * Get kernel virtual address for CPU access.. this more or less only
941 * held. 962 * exists for omap_fbdev.
942 */ 963 */
943void *omap_gem_vaddr(struct drm_gem_object *obj) 964void *omap_gem_vaddr(struct drm_gem_object *obj)
944{ 965{
945 struct omap_gem_object *omap_obj = to_omap_bo(obj); 966 struct omap_gem_object *omap_obj = to_omap_bo(obj);
946 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 967 void *vaddr;
947 if (!omap_obj->vaddr) { 968 int ret;
948 int ret; 969
970 mutex_lock(&omap_obj->lock);
949 971
972 if (!omap_obj->vaddr) {
950 ret = omap_gem_attach_pages(obj); 973 ret = omap_gem_attach_pages(obj);
951 if (ret) 974 if (ret) {
952 return ERR_PTR(ret); 975 vaddr = ERR_PTR(ret);
976 goto unlock;
977 }
978
953 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT, 979 omap_obj->vaddr = vmap(omap_obj->pages, obj->size >> PAGE_SHIFT,
954 VM_MAP, pgprot_writecombine(PAGE_KERNEL)); 980 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
955 } 981 }
956 return omap_obj->vaddr; 982
983 vaddr = omap_obj->vaddr;
984
985unlock:
986 mutex_unlock(&omap_obj->lock);
987 return vaddr;
957} 988}
958#endif 989#endif
959 990
@@ -1001,6 +1032,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1001 1032
1002 off = drm_vma_node_start(&obj->vma_node); 1033 off = drm_vma_node_start(&obj->vma_node);
1003 1034
1035 mutex_lock(&omap_obj->lock);
1036
1004 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", 1037 seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1005 omap_obj->flags, obj->name, kref_read(&obj->refcount), 1038 omap_obj->flags, obj->name, kref_read(&obj->refcount),
1006 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt, 1039 off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt,
@@ -1018,6 +1051,8 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
1018 seq_printf(m, " %zu", obj->size); 1051 seq_printf(m, " %zu", obj->size);
1019 } 1052 }
1020 1053
1054 mutex_unlock(&omap_obj->lock);
1055
1021 seq_printf(m, "\n"); 1056 seq_printf(m, "\n");
1022} 1057}
1023 1058
@@ -1051,15 +1086,19 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1051 1086
1052 omap_gem_evict(obj); 1087 omap_gem_evict(obj);
1053 1088
1054 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1055
1056 spin_lock(&priv->list_lock); 1089 spin_lock(&priv->list_lock);
1057 list_del(&omap_obj->mm_list); 1090 list_del(&omap_obj->mm_list);
1058 spin_unlock(&priv->list_lock); 1091 spin_unlock(&priv->list_lock);
1059 1092
1060 /* this means the object is still pinned.. which really should 1093 /*
1061 * not happen. I think.. 1094 * We own the sole reference to the object at this point, but to keep
1095 * lockdep happy, we must still take the omap_obj_lock to call
1096 * omap_gem_detach_pages(). This should hardly make any difference as
1097 * there can't be any lock contention.
1062 */ 1098 */
1099 mutex_lock(&omap_obj->lock);
1100
1101 /* The object should not be pinned. */
1063 WARN_ON(omap_obj->dma_addr_cnt > 0); 1102 WARN_ON(omap_obj->dma_addr_cnt > 0);
1064 1103
1065 if (omap_obj->pages) { 1104 if (omap_obj->pages) {
@@ -1078,8 +1117,12 @@ void omap_gem_free_object(struct drm_gem_object *obj)
1078 drm_prime_gem_destroy(obj, omap_obj->sgt); 1117 drm_prime_gem_destroy(obj, omap_obj->sgt);
1079 } 1118 }
1080 1119
1120 mutex_unlock(&omap_obj->lock);
1121
1081 drm_gem_object_release(obj); 1122 drm_gem_object_release(obj);
1082 1123
1124 mutex_destroy(&omap_obj->lock);
1125
1083 kfree(omap_obj); 1126 kfree(omap_obj);
1084} 1127}
1085 1128
@@ -1135,6 +1178,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1135 1178
1136 obj = &omap_obj->base; 1179 obj = &omap_obj->base;
1137 omap_obj->flags = flags; 1180 omap_obj->flags = flags;
1181 mutex_init(&omap_obj->lock);
1138 1182
1139 if (flags & OMAP_BO_TILED) { 1183 if (flags & OMAP_BO_TILED) {
1140 /* 1184 /*
@@ -1199,16 +1243,15 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1199 if (sgt->orig_nents != 1 && !priv->has_dmm) 1243 if (sgt->orig_nents != 1 && !priv->has_dmm)
1200 return ERR_PTR(-EINVAL); 1244 return ERR_PTR(-EINVAL);
1201 1245
1202 mutex_lock(&dev->struct_mutex);
1203
1204 gsize.bytes = PAGE_ALIGN(size); 1246 gsize.bytes = PAGE_ALIGN(size);
1205 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC); 1247 obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
1206 if (!obj) { 1248 if (!obj)
1207 obj = ERR_PTR(-ENOMEM); 1249 return ERR_PTR(-ENOMEM);
1208 goto done;
1209 }
1210 1250
1211 omap_obj = to_omap_bo(obj); 1251 omap_obj = to_omap_bo(obj);
1252
1253 mutex_lock(&omap_obj->lock);
1254
1212 omap_obj->sgt = sgt; 1255 omap_obj->sgt = sgt;
1213 1256
1214 if (sgt->orig_nents == 1) { 1257 if (sgt->orig_nents == 1) {
@@ -1244,7 +1287,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
1244 } 1287 }
1245 1288
1246done: 1289done:
1247 mutex_unlock(&dev->struct_mutex); 1290 mutex_unlock(&omap_obj->lock);
1248 return obj; 1291 return obj;
1249} 1292}
1250 1293