aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c16
-rw-r--r--drivers/gpu/drm/drm_gem.c44
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_prime.c10
4 files changed, 40 insertions, 40 deletions
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 920bb5764cd6..be6d90664e50 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -102,7 +102,7 @@ void drm_fb_cma_destroy(struct drm_framebuffer *fb)
102 102
103 for (i = 0; i < 4; i++) { 103 for (i = 0; i < 4; i++) {
104 if (fb_cma->obj[i]) 104 if (fb_cma->obj[i])
105 drm_gem_object_unreference_unlocked(&fb_cma->obj[i]->base); 105 drm_gem_object_put_unlocked(&fb_cma->obj[i]->base);
106 } 106 }
107 107
108 drm_framebuffer_cleanup(fb); 108 drm_framebuffer_cleanup(fb);
@@ -190,7 +190,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
190 if (!obj) { 190 if (!obj) {
191 dev_err(dev->dev, "Failed to lookup GEM object\n"); 191 dev_err(dev->dev, "Failed to lookup GEM object\n");
192 ret = -ENXIO; 192 ret = -ENXIO;
193 goto err_gem_object_unreference; 193 goto err_gem_object_put;
194 } 194 }
195 195
196 min_size = (height - 1) * mode_cmd->pitches[i] 196 min_size = (height - 1) * mode_cmd->pitches[i]
@@ -198,9 +198,9 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
198 + mode_cmd->offsets[i]; 198 + mode_cmd->offsets[i];
199 199
200 if (obj->size < min_size) { 200 if (obj->size < min_size) {
201 drm_gem_object_unreference_unlocked(obj); 201 drm_gem_object_put_unlocked(obj);
202 ret = -EINVAL; 202 ret = -EINVAL;
203 goto err_gem_object_unreference; 203 goto err_gem_object_put;
204 } 204 }
205 objs[i] = to_drm_gem_cma_obj(obj); 205 objs[i] = to_drm_gem_cma_obj(obj);
206 } 206 }
@@ -208,14 +208,14 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
208 fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs); 208 fb_cma = drm_fb_cma_alloc(dev, mode_cmd, objs, i, funcs);
209 if (IS_ERR(fb_cma)) { 209 if (IS_ERR(fb_cma)) {
210 ret = PTR_ERR(fb_cma); 210 ret = PTR_ERR(fb_cma);
211 goto err_gem_object_unreference; 211 goto err_gem_object_put;
212 } 212 }
213 213
214 return &fb_cma->fb; 214 return &fb_cma->fb;
215 215
216err_gem_object_unreference: 216err_gem_object_put:
217 for (i--; i >= 0; i--) 217 for (i--; i >= 0; i--)
218 drm_gem_object_unreference_unlocked(&objs[i]->base); 218 drm_gem_object_put_unlocked(&objs[i]->base);
219 return ERR_PTR(ret); 219 return ERR_PTR(ret);
220} 220}
221EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs); 221EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
@@ -477,7 +477,7 @@ err_cma_destroy:
477err_fb_info_destroy: 477err_fb_info_destroy:
478 drm_fb_helper_fini(helper); 478 drm_fb_helper_fini(helper);
479err_gem_free_object: 479err_gem_free_object:
480 drm_gem_object_unreference_unlocked(&obj->base); 480 drm_gem_object_put_unlocked(&obj->base);
481 return ret; 481 return ret;
482} 482}
483 483
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index bc93de308673..b1e28c944637 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
218} 218}
219 219
220static void 220static void
221drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 221drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
222{ 222{
223 struct drm_device *dev = obj->dev; 223 struct drm_device *dev = obj->dev;
224 bool final = false; 224 bool final = false;
@@ -241,7 +241,7 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
241 mutex_unlock(&dev->object_name_lock); 241 mutex_unlock(&dev->object_name_lock);
242 242
243 if (final) 243 if (final)
244 drm_gem_object_unreference_unlocked(obj); 244 drm_gem_object_put_unlocked(obj);
245} 245}
246 246
247/* 247/*
@@ -262,7 +262,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
262 if (dev->driver->gem_close_object) 262 if (dev->driver->gem_close_object)
263 dev->driver->gem_close_object(obj, file_priv); 263 dev->driver->gem_close_object(obj, file_priv);
264 264
265 drm_gem_object_handle_unreference_unlocked(obj); 265 drm_gem_object_handle_put_unlocked(obj);
266 266
267 return 0; 267 return 0;
268} 268}
@@ -352,7 +352,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
352 352
353 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 353 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
354 if (obj->handle_count++ == 0) 354 if (obj->handle_count++ == 0)
355 drm_gem_object_reference(obj); 355 drm_gem_object_get(obj);
356 356
357 /* 357 /*
358 * Get the user-visible handle using idr. Preload and perform 358 * Get the user-visible handle using idr. Preload and perform
@@ -392,7 +392,7 @@ err_remove:
392 idr_remove(&file_priv->object_idr, handle); 392 idr_remove(&file_priv->object_idr, handle);
393 spin_unlock(&file_priv->table_lock); 393 spin_unlock(&file_priv->table_lock);
394err_unref: 394err_unref:
395 drm_gem_object_handle_unreference_unlocked(obj); 395 drm_gem_object_handle_put_unlocked(obj);
396 return ret; 396 return ret;
397} 397}
398 398
@@ -606,7 +606,7 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
606 /* Check if we currently have a reference on the object */ 606 /* Check if we currently have a reference on the object */
607 obj = idr_find(&filp->object_idr, handle); 607 obj = idr_find(&filp->object_idr, handle);
608 if (obj) 608 if (obj)
609 drm_gem_object_reference(obj); 609 drm_gem_object_get(obj);
610 610
611 spin_unlock(&filp->table_lock); 611 spin_unlock(&filp->table_lock);
612 612
@@ -683,7 +683,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
683 683
684err: 684err:
685 mutex_unlock(&dev->object_name_lock); 685 mutex_unlock(&dev->object_name_lock);
686 drm_gem_object_unreference_unlocked(obj); 686 drm_gem_object_put_unlocked(obj);
687 return ret; 687 return ret;
688} 688}
689 689
@@ -713,7 +713,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
713 mutex_lock(&dev->object_name_lock); 713 mutex_lock(&dev->object_name_lock);
714 obj = idr_find(&dev->object_name_idr, (int) args->name); 714 obj = idr_find(&dev->object_name_idr, (int) args->name);
715 if (obj) { 715 if (obj) {
716 drm_gem_object_reference(obj); 716 drm_gem_object_get(obj);
717 } else { 717 } else {
718 mutex_unlock(&dev->object_name_lock); 718 mutex_unlock(&dev->object_name_lock);
719 return -ENOENT; 719 return -ENOENT;
@@ -721,7 +721,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
721 721
722 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 722 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
723 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 723 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
724 drm_gem_object_unreference_unlocked(obj); 724 drm_gem_object_put_unlocked(obj);
725 if (ret) 725 if (ret)
726 return ret; 726 return ret;
727 727
@@ -809,16 +809,16 @@ drm_gem_object_free(struct kref *kref)
809EXPORT_SYMBOL(drm_gem_object_free); 809EXPORT_SYMBOL(drm_gem_object_free);
810 810
811/** 811/**
812 * drm_gem_object_unreference_unlocked - release a GEM BO reference 812 * drm_gem_object_put_unlocked - drop a GEM buffer object reference
813 * @obj: GEM buffer object 813 * @obj: GEM buffer object
814 * 814 *
815 * This releases a reference to @obj. Callers must not hold the 815 * This releases a reference to @obj. Callers must not hold the
816 * &drm_device.struct_mutex lock when calling this function. 816 * &drm_device.struct_mutex lock when calling this function.
817 * 817 *
818 * See also __drm_gem_object_unreference(). 818 * See also __drm_gem_object_put().
819 */ 819 */
820void 820void
821drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) 821drm_gem_object_put_unlocked(struct drm_gem_object *obj)
822{ 822{
823 struct drm_device *dev; 823 struct drm_device *dev;
824 824
@@ -834,10 +834,10 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
834 &dev->struct_mutex)) 834 &dev->struct_mutex))
835 mutex_unlock(&dev->struct_mutex); 835 mutex_unlock(&dev->struct_mutex);
836} 836}
837EXPORT_SYMBOL(drm_gem_object_unreference_unlocked); 837EXPORT_SYMBOL(drm_gem_object_put_unlocked);
838 838
839/** 839/**
840 * drm_gem_object_unreference - release a GEM BO reference 840 * drm_gem_object_put - release a GEM buffer object reference
841 * @obj: GEM buffer object 841 * @obj: GEM buffer object
842 * 842 *
843 * This releases a reference to @obj. Callers must hold the 843 * This releases a reference to @obj. Callers must hold the
@@ -845,10 +845,10 @@ EXPORT_SYMBOL(drm_gem_object_unreference_unlocked);
845 * driver doesn't use &drm_device.struct_mutex for anything. 845 * driver doesn't use &drm_device.struct_mutex for anything.
846 * 846 *
847 * For drivers not encumbered with legacy locking use 847 * For drivers not encumbered with legacy locking use
848 * drm_gem_object_unreference_unlocked() instead. 848 * drm_gem_object_put_unlocked() instead.
849 */ 849 */
850void 850void
851drm_gem_object_unreference(struct drm_gem_object *obj) 851drm_gem_object_put(struct drm_gem_object *obj)
852{ 852{
853 if (obj) { 853 if (obj) {
854 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 854 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -856,7 +856,7 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
856 kref_put(&obj->refcount, drm_gem_object_free); 856 kref_put(&obj->refcount, drm_gem_object_free);
857 } 857 }
858} 858}
859EXPORT_SYMBOL(drm_gem_object_unreference); 859EXPORT_SYMBOL(drm_gem_object_put);
860 860
861/** 861/**
862 * drm_gem_vm_open - vma->ops->open implementation for GEM 862 * drm_gem_vm_open - vma->ops->open implementation for GEM
@@ -869,7 +869,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
869{ 869{
870 struct drm_gem_object *obj = vma->vm_private_data; 870 struct drm_gem_object *obj = vma->vm_private_data;
871 871
872 drm_gem_object_reference(obj); 872 drm_gem_object_get(obj);
873} 873}
874EXPORT_SYMBOL(drm_gem_vm_open); 874EXPORT_SYMBOL(drm_gem_vm_open);
875 875
@@ -884,7 +884,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
884{ 884{
885 struct drm_gem_object *obj = vma->vm_private_data; 885 struct drm_gem_object *obj = vma->vm_private_data;
886 886
887 drm_gem_object_unreference_unlocked(obj); 887 drm_gem_object_put_unlocked(obj);
888} 888}
889EXPORT_SYMBOL(drm_gem_vm_close); 889EXPORT_SYMBOL(drm_gem_vm_close);
890 890
@@ -935,7 +935,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
935 * (which should happen whether the vma was created by this call, or 935 * (which should happen whether the vma was created by this call, or
936 * by a vm_open due to mremap or partial unmap or whatever). 936 * by a vm_open due to mremap or partial unmap or whatever).
937 */ 937 */
938 drm_gem_object_reference(obj); 938 drm_gem_object_get(obj);
939 939
940 return 0; 940 return 0;
941} 941}
@@ -992,14 +992,14 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
992 return -EINVAL; 992 return -EINVAL;
993 993
994 if (!drm_vma_node_is_allowed(node, priv)) { 994 if (!drm_vma_node_is_allowed(node, priv)) {
995 drm_gem_object_unreference_unlocked(obj); 995 drm_gem_object_put_unlocked(obj);
996 return -EACCES; 996 return -EACCES;
997 } 997 }
998 998
999 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 999 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1000 vma); 1000 vma);
1001 1001
1002 drm_gem_object_unreference_unlocked(obj); 1002 drm_gem_object_put_unlocked(obj);
1003 1003
1004 return ret; 1004 return ret;
1005} 1005}
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 5cf38a474845..906984d4bec2 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
121 return cma_obj; 121 return cma_obj;
122 122
123error: 123error:
124 drm_gem_object_unreference_unlocked(&cma_obj->base); 124 drm_gem_object_put_unlocked(&cma_obj->base);
125 return ERR_PTR(ret); 125 return ERR_PTR(ret);
126} 126}
127EXPORT_SYMBOL_GPL(drm_gem_cma_create); 127EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -163,7 +163,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
163 */ 163 */
164 ret = drm_gem_handle_create(file_priv, gem_obj, handle); 164 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
165 /* drop reference from allocate - handle holds it now. */ 165 /* drop reference from allocate - handle holds it now. */
166 drm_gem_object_unreference_unlocked(gem_obj); 166 drm_gem_object_put_unlocked(gem_obj);
167 if (ret) 167 if (ret)
168 return ERR_PTR(ret); 168 return ERR_PTR(ret);
169 169
@@ -293,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
293 293
294 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node); 294 *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
295 295
296 drm_gem_object_unreference_unlocked(gem_obj); 296 drm_gem_object_put_unlocked(gem_obj);
297 297
298 return 0; 298 return 0;
299} 299}
@@ -416,13 +416,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
416 return -EINVAL; 416 return -EINVAL;
417 417
418 if (!drm_vma_node_is_allowed(node, priv)) { 418 if (!drm_vma_node_is_allowed(node, priv)) {
419 drm_gem_object_unreference_unlocked(obj); 419 drm_gem_object_put_unlocked(obj);
420 return -EACCES; 420 return -EACCES;
421 } 421 }
422 422
423 cma_obj = to_drm_gem_cma_obj(obj); 423 cma_obj = to_drm_gem_cma_obj(obj);
424 424
425 drm_gem_object_unreference_unlocked(obj); 425 drm_gem_object_put_unlocked(obj);
426 426
427 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL; 427 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
428} 428}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 25aa4558f1b5..866b294e7c61 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -318,7 +318,7 @@ struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
318 return dma_buf; 318 return dma_buf;
319 319
320 drm_dev_ref(dev); 320 drm_dev_ref(dev);
321 drm_gem_object_reference(exp_info->priv); 321 drm_gem_object_get(exp_info->priv);
322 322
323 return dma_buf; 323 return dma_buf;
324} 324}
@@ -339,7 +339,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
339 struct drm_device *dev = obj->dev; 339 struct drm_device *dev = obj->dev;
340 340
341 /* drop the reference on the export fd holds */ 341 /* drop the reference on the export fd holds */
342 drm_gem_object_unreference_unlocked(obj); 342 drm_gem_object_put_unlocked(obj);
343 343
344 drm_dev_unref(dev); 344 drm_dev_unref(dev);
345} 345}
@@ -585,7 +585,7 @@ out_have_handle:
585fail_put_dmabuf: 585fail_put_dmabuf:
586 dma_buf_put(dmabuf); 586 dma_buf_put(dmabuf);
587out: 587out:
588 drm_gem_object_unreference_unlocked(obj); 588 drm_gem_object_put_unlocked(obj);
589out_unlock: 589out_unlock:
590 mutex_unlock(&file_priv->prime.lock); 590 mutex_unlock(&file_priv->prime.lock);
591 591
@@ -616,7 +616,7 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
616 * Importing dmabuf exported from out own gem increases 616 * Importing dmabuf exported from out own gem increases
617 * refcount on gem itself instead of f_count of dmabuf. 617 * refcount on gem itself instead of f_count of dmabuf.
618 */ 618 */
619 drm_gem_object_reference(obj); 619 drm_gem_object_get(obj);
620 return obj; 620 return obj;
621 } 621 }
622 } 622 }
@@ -704,7 +704,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
704 704
705 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */ 705 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
706 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 706 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
707 drm_gem_object_unreference_unlocked(obj); 707 drm_gem_object_put_unlocked(obj);
708 if (ret) 708 if (ret)
709 goto out_put; 709 goto out_put;
710 710