aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-01-13 19:55:36 -0500
committerDave Airlie <airlied@redhat.com>2014-01-13 19:55:36 -0500
commitfaf096ffba2b1a4066e6d6dcd1243cc5f3d1fb23 (patch)
tree4793c625d9eaae10f453b8c4d32543de86875402
parenta095c60bd06f204c98527aafd5fda6ef42b53eb5 (diff)
parent94844cf06568d9592f985e4bd0b9d759a56043c6 (diff)
Merge tag 'vmwgfx-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux into drm-next
Anyway, nothing big here, Three more code cleanup patches from Rashika Kheria, and one TTM/vmwgfx patch from me that tightens security around TTM objects enough for them to opened using prime objects from render nodes: Previously any client could access a shared buffer using the "name", also without actually opening it. Now a reference is required, and for render nodes such a reference is intended to only be obtainable using a prime fd. vmwgfx-next 2014-01-13 pull request * tag 'vmwgfx-next-2014-01-13' of git://people.freedesktop.org/~thomash/linux: drivers: gpu: Mark functions as static in vmwgfx_fence.c drivers: gpu: Mark functions as static in vmwgfx_buffer.c drivers: gpu: Mark functions as static in vmwgfx_kms.c drm/ttm: ttm object security fixes for render nodes
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c90
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c3
-rw-r--r--include/drm/ttm/ttm_object.h18
6 files changed, 87 insertions, 53 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 6fe7b92a82d1..37079859afc8 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -68,7 +68,7 @@
68 68
69struct ttm_object_file { 69struct ttm_object_file {
70 struct ttm_object_device *tdev; 70 struct ttm_object_device *tdev;
71 rwlock_t lock; 71 spinlock_t lock;
72 struct list_head ref_list; 72 struct list_head ref_list;
73 struct drm_open_hash ref_hash[TTM_REF_NUM]; 73 struct drm_open_hash ref_hash[TTM_REF_NUM];
74 struct kref refcount; 74 struct kref refcount;
@@ -118,6 +118,7 @@ struct ttm_object_device {
118 */ 118 */
119 119
120struct ttm_ref_object { 120struct ttm_ref_object {
121 struct rcu_head rcu_head;
121 struct drm_hash_item hash; 122 struct drm_hash_item hash;
122 struct list_head head; 123 struct list_head head;
123 struct kref kref; 124 struct kref kref;
@@ -210,10 +211,9 @@ static void ttm_release_base(struct kref *kref)
210 * call_rcu() or ttm_base_object_kfree(). 211 * call_rcu() or ttm_base_object_kfree().
211 */ 212 */
212 213
213 if (base->refcount_release) { 214 ttm_object_file_unref(&base->tfile);
214 ttm_object_file_unref(&base->tfile); 215 if (base->refcount_release)
215 base->refcount_release(&base); 216 base->refcount_release(&base);
216 }
217} 217}
218 218
219void ttm_base_object_unref(struct ttm_base_object **p_base) 219void ttm_base_object_unref(struct ttm_base_object **p_base)
@@ -229,32 +229,46 @@ EXPORT_SYMBOL(ttm_base_object_unref);
229struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, 229struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
230 uint32_t key) 230 uint32_t key)
231{ 231{
232 struct ttm_object_device *tdev = tfile->tdev; 232 struct ttm_base_object *base = NULL;
233 struct ttm_base_object *uninitialized_var(base);
234 struct drm_hash_item *hash; 233 struct drm_hash_item *hash;
234 struct drm_open_hash *ht = &tfile->ref_hash[TTM_REF_USAGE];
235 int ret; 235 int ret;
236 236
237 rcu_read_lock(); 237 rcu_read_lock();
238 ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash); 238 ret = drm_ht_find_item_rcu(ht, key, &hash);
239 239
240 if (likely(ret == 0)) { 240 if (likely(ret == 0)) {
241 base = drm_hash_entry(hash, struct ttm_base_object, hash); 241 base = drm_hash_entry(hash, struct ttm_ref_object, hash)->obj;
242 ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL; 242 if (!kref_get_unless_zero(&base->refcount))
243 base = NULL;
243 } 244 }
244 rcu_read_unlock(); 245 rcu_read_unlock();
245 246
246 if (unlikely(ret != 0)) 247 return base;
247 return NULL; 248}
249EXPORT_SYMBOL(ttm_base_object_lookup);
248 250
249 if (tfile != base->tfile && !base->shareable) { 251struct ttm_base_object *
250 pr_err("Attempted access of non-shareable object\n"); 252ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key)
251 ttm_base_object_unref(&base); 253{
252 return NULL; 254 struct ttm_base_object *base = NULL;
255 struct drm_hash_item *hash;
256 struct drm_open_hash *ht = &tdev->object_hash;
257 int ret;
258
259 rcu_read_lock();
260 ret = drm_ht_find_item_rcu(ht, key, &hash);
261
262 if (likely(ret == 0)) {
263 base = drm_hash_entry(hash, struct ttm_base_object, hash);
264 if (!kref_get_unless_zero(&base->refcount))
265 base = NULL;
253 } 266 }
267 rcu_read_unlock();
254 268
255 return base; 269 return base;
256} 270}
257EXPORT_SYMBOL(ttm_base_object_lookup); 271EXPORT_SYMBOL(ttm_base_object_lookup_for_ref);
258 272
259int ttm_ref_object_add(struct ttm_object_file *tfile, 273int ttm_ref_object_add(struct ttm_object_file *tfile,
260 struct ttm_base_object *base, 274 struct ttm_base_object *base,
@@ -266,21 +280,25 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
266 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 280 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
267 int ret = -EINVAL; 281 int ret = -EINVAL;
268 282
283 if (base->tfile != tfile && !base->shareable)
284 return -EPERM;
285
269 if (existed != NULL) 286 if (existed != NULL)
270 *existed = true; 287 *existed = true;
271 288
272 while (ret == -EINVAL) { 289 while (ret == -EINVAL) {
273 read_lock(&tfile->lock); 290 rcu_read_lock();
274 ret = drm_ht_find_item(ht, base->hash.key, &hash); 291 ret = drm_ht_find_item_rcu(ht, base->hash.key, &hash);
275 292
276 if (ret == 0) { 293 if (ret == 0) {
277 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 294 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
278 kref_get(&ref->kref); 295 if (!kref_get_unless_zero(&ref->kref)) {
279 read_unlock(&tfile->lock); 296 rcu_read_unlock();
280 break; 297 break;
298 }
281 } 299 }
282 300
283 read_unlock(&tfile->lock); 301 rcu_read_unlock();
284 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref), 302 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
285 false, false); 303 false, false);
286 if (unlikely(ret != 0)) 304 if (unlikely(ret != 0))
@@ -297,19 +315,19 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
297 ref->ref_type = ref_type; 315 ref->ref_type = ref_type;
298 kref_init(&ref->kref); 316 kref_init(&ref->kref);
299 317
300 write_lock(&tfile->lock); 318 spin_lock(&tfile->lock);
301 ret = drm_ht_insert_item(ht, &ref->hash); 319 ret = drm_ht_insert_item_rcu(ht, &ref->hash);
302 320
303 if (likely(ret == 0)) { 321 if (likely(ret == 0)) {
304 list_add_tail(&ref->head, &tfile->ref_list); 322 list_add_tail(&ref->head, &tfile->ref_list);
305 kref_get(&base->refcount); 323 kref_get(&base->refcount);
306 write_unlock(&tfile->lock); 324 spin_unlock(&tfile->lock);
307 if (existed != NULL) 325 if (existed != NULL)
308 *existed = false; 326 *existed = false;
309 break; 327 break;
310 } 328 }
311 329
312 write_unlock(&tfile->lock); 330 spin_unlock(&tfile->lock);
313 BUG_ON(ret != -EINVAL); 331 BUG_ON(ret != -EINVAL);
314 332
315 ttm_mem_global_free(mem_glob, sizeof(*ref)); 333 ttm_mem_global_free(mem_glob, sizeof(*ref));
@@ -330,17 +348,17 @@ static void ttm_ref_object_release(struct kref *kref)
330 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob; 348 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
331 349
332 ht = &tfile->ref_hash[ref->ref_type]; 350 ht = &tfile->ref_hash[ref->ref_type];
333 (void)drm_ht_remove_item(ht, &ref->hash); 351 (void)drm_ht_remove_item_rcu(ht, &ref->hash);
334 list_del(&ref->head); 352 list_del(&ref->head);
335 write_unlock(&tfile->lock); 353 spin_unlock(&tfile->lock);
336 354
337 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release) 355 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
338 base->ref_obj_release(base, ref->ref_type); 356 base->ref_obj_release(base, ref->ref_type);
339 357
340 ttm_base_object_unref(&ref->obj); 358 ttm_base_object_unref(&ref->obj);
341 ttm_mem_global_free(mem_glob, sizeof(*ref)); 359 ttm_mem_global_free(mem_glob, sizeof(*ref));
342 kfree(ref); 360 kfree_rcu(ref, rcu_head);
343 write_lock(&tfile->lock); 361 spin_lock(&tfile->lock);
344} 362}
345 363
346int ttm_ref_object_base_unref(struct ttm_object_file *tfile, 364int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
@@ -351,15 +369,15 @@ int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
351 struct drm_hash_item *hash; 369 struct drm_hash_item *hash;
352 int ret; 370 int ret;
353 371
354 write_lock(&tfile->lock); 372 spin_lock(&tfile->lock);
355 ret = drm_ht_find_item(ht, key, &hash); 373 ret = drm_ht_find_item(ht, key, &hash);
356 if (unlikely(ret != 0)) { 374 if (unlikely(ret != 0)) {
357 write_unlock(&tfile->lock); 375 spin_unlock(&tfile->lock);
358 return -EINVAL; 376 return -EINVAL;
359 } 377 }
360 ref = drm_hash_entry(hash, struct ttm_ref_object, hash); 378 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
361 kref_put(&ref->kref, ttm_ref_object_release); 379 kref_put(&ref->kref, ttm_ref_object_release);
362 write_unlock(&tfile->lock); 380 spin_unlock(&tfile->lock);
363 return 0; 381 return 0;
364} 382}
365EXPORT_SYMBOL(ttm_ref_object_base_unref); 383EXPORT_SYMBOL(ttm_ref_object_base_unref);
@@ -372,7 +390,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
372 struct ttm_object_file *tfile = *p_tfile; 390 struct ttm_object_file *tfile = *p_tfile;
373 391
374 *p_tfile = NULL; 392 *p_tfile = NULL;
375 write_lock(&tfile->lock); 393 spin_lock(&tfile->lock);
376 394
377 /* 395 /*
378 * Since we release the lock within the loop, we have to 396 * Since we release the lock within the loop, we have to
@@ -388,7 +406,7 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
388 for (i = 0; i < TTM_REF_NUM; ++i) 406 for (i = 0; i < TTM_REF_NUM; ++i)
389 drm_ht_remove(&tfile->ref_hash[i]); 407 drm_ht_remove(&tfile->ref_hash[i]);
390 408
391 write_unlock(&tfile->lock); 409 spin_unlock(&tfile->lock);
392 ttm_object_file_unref(&tfile); 410 ttm_object_file_unref(&tfile);
393} 411}
394EXPORT_SYMBOL(ttm_object_file_release); 412EXPORT_SYMBOL(ttm_object_file_release);
@@ -404,7 +422,7 @@ struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
404 if (unlikely(tfile == NULL)) 422 if (unlikely(tfile == NULL))
405 return NULL; 423 return NULL;
406 424
407 rwlock_init(&tfile->lock); 425 spin_lock_init(&tfile->lock);
408 tfile->tdev = tdev; 426 tfile->tdev = tdev;
409 kref_init(&tfile->refcount); 427 kref_init(&tfile->refcount);
410 INIT_LIST_HEAD(&tfile->ref_list); 428 INIT_LIST_HEAD(&tfile->ref_list);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 0489c6152482..2d61a2d86bd7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -517,7 +517,7 @@ static struct ttm_backend_func vmw_ttm_func = {
517 .destroy = vmw_ttm_destroy, 517 .destroy = vmw_ttm_destroy,
518}; 518};
519 519
520struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev, 520static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
521 unsigned long size, uint32_t page_flags, 521 unsigned long size, uint32_t page_flags,
522 struct page *dummy_read_page) 522 struct page *dummy_read_page)
523{ 523{
@@ -546,12 +546,12 @@ out_no_init:
546 return NULL; 546 return NULL;
547} 547}
548 548
549int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 549static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
550{ 550{
551 return 0; 551 return 0;
552} 552}
553 553
554int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 554static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
555 struct ttm_mem_type_manager *man) 555 struct ttm_mem_type_manager *man)
556{ 556{
557 switch (type) { 557 switch (type) {
@@ -589,7 +589,7 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
589 return 0; 589 return 0;
590} 590}
591 591
592void vmw_evict_flags(struct ttm_buffer_object *bo, 592static void vmw_evict_flags(struct ttm_buffer_object *bo,
593 struct ttm_placement *placement) 593 struct ttm_placement *placement)
594{ 594{
595 *placement = vmw_sys_placement; 595 *placement = vmw_sys_placement;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c62d20e8a6f1..436b013b4231 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -271,7 +271,7 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
271 spin_unlock_irq(&fman->lock); 271 spin_unlock_irq(&fman->lock);
272} 272}
273 273
274void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 274static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
275 struct list_head *list) 275 struct list_head *list)
276{ 276{
277 struct vmw_fence_action *action, *next_action; 277 struct vmw_fence_action *action, *next_action;
@@ -897,7 +897,7 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
897 * Note that the action callbacks may be executed before this function 897 * Note that the action callbacks may be executed before this function
898 * returns. 898 * returns.
899 */ 899 */
900void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 900static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
901 struct vmw_fence_action *action) 901 struct vmw_fence_action *action)
902{ 902{
903 struct vmw_fence_manager *fman = fence->fman; 903 struct vmw_fence_manager *fman = fence->fman;
@@ -993,7 +993,7 @@ struct vmw_event_fence_pending {
993 struct drm_vmw_event_fence event; 993 struct drm_vmw_event_fence event;
994}; 994};
995 995
996int vmw_event_fence_action_create(struct drm_file *file_priv, 996static int vmw_event_fence_action_create(struct drm_file *file_priv,
997 struct vmw_fence_obj *fence, 997 struct vmw_fence_obj *fence,
998 uint32_t flags, 998 uint32_t flags,
999 uint64_t user_data, 999 uint64_t user_data,
@@ -1080,7 +1080,8 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1080 */ 1080 */
1081 if (arg->handle) { 1081 if (arg->handle) {
1082 struct ttm_base_object *base = 1082 struct ttm_base_object *base =
1083 ttm_base_object_lookup(vmw_fp->tfile, arg->handle); 1083 ttm_base_object_lookup_for_ref(dev_priv->tdev,
1084 arg->handle);
1084 1085
1085 if (unlikely(base == NULL)) { 1086 if (unlikely(base == NULL)) {
1086 DRM_ERROR("Fence event invalid fence object handle " 1087 DRM_ERROR("Fence event invalid fence object handle "
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9f307e0f3603..019e2dbb46c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -40,7 +40,7 @@ struct vmw_clip_rect {
40 * Clip @num_rects number of @rects against @clip storing the 40 * Clip @num_rects number of @rects against @clip storing the
41 * results in @out_rects and the number of passed rects in @out_num. 41 * results in @out_rects and the number of passed rects in @out_num.
42 */ 42 */
43void vmw_clip_cliprects(struct drm_clip_rect *rects, 43static void vmw_clip_cliprects(struct drm_clip_rect *rects,
44 int num_rects, 44 int num_rects,
45 struct vmw_clip_rect clip, 45 struct vmw_clip_rect clip,
46 SVGASignedRect *out_rects, 46 SVGASignedRect *out_rects,
@@ -423,7 +423,7 @@ struct vmw_framebuffer_surface {
423 struct drm_master *master; 423 struct drm_master *master;
424}; 424};
425 425
426void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) 426static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
427{ 427{
428 struct vmw_framebuffer_surface *vfbs = 428 struct vmw_framebuffer_surface *vfbs =
429 vmw_framebuffer_to_vfbs(framebuffer); 429 vmw_framebuffer_to_vfbs(framebuffer);
@@ -589,7 +589,7 @@ out_free_tmp:
589 return ret; 589 return ret;
590} 590}
591 591
592int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, 592static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
593 struct drm_file *file_priv, 593 struct drm_file *file_priv,
594 unsigned flags, unsigned color, 594 unsigned flags, unsigned color,
595 struct drm_clip_rect *clips, 595 struct drm_clip_rect *clips,
@@ -761,7 +761,7 @@ struct vmw_framebuffer_dmabuf {
761 struct vmw_dma_buffer *buffer; 761 struct vmw_dma_buffer *buffer;
762}; 762};
763 763
764void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) 764static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
765{ 765{
766 struct vmw_framebuffer_dmabuf *vfbd = 766 struct vmw_framebuffer_dmabuf *vfbd =
767 vmw_framebuffer_to_vfbd(framebuffer); 767 vmw_framebuffer_to_vfbd(framebuffer);
@@ -947,7 +947,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
947 return ret; 947 return ret;
948} 948}
949 949
950int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, 950static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
951 struct drm_file *file_priv, 951 struct drm_file *file_priv,
952 unsigned flags, unsigned color, 952 unsigned flags, unsigned color,
953 struct drm_clip_rect *clips, 953 struct drm_clip_rect *clips,
@@ -1677,7 +1677,7 @@ void vmw_disable_vblank(struct drm_device *dev, int crtc)
1677 * Small shared kms functions. 1677 * Small shared kms functions.
1678 */ 1678 */
1679 1679
1680int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num, 1680static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1681 struct drm_vmw_rect *rects) 1681 struct drm_vmw_rect *rects)
1682{ 1682{
1683 struct drm_device *dev = dev_priv->dev; 1683 struct drm_device *dev = dev_priv->dev;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7de2ea8bd553..0fc93398bba2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -843,6 +843,7 @@ out_unlock:
843int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, 843int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
844 struct drm_file *file_priv) 844 struct drm_file *file_priv)
845{ 845{
846 struct vmw_private *dev_priv = vmw_priv(dev);
846 union drm_vmw_surface_reference_arg *arg = 847 union drm_vmw_surface_reference_arg *arg =
847 (union drm_vmw_surface_reference_arg *)data; 848 (union drm_vmw_surface_reference_arg *)data;
848 struct drm_vmw_surface_arg *req = &arg->req; 849 struct drm_vmw_surface_arg *req = &arg->req;
@@ -854,7 +855,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
854 struct ttm_base_object *base; 855 struct ttm_base_object *base;
855 int ret = -EINVAL; 856 int ret = -EINVAL;
856 857
857 base = ttm_base_object_lookup(tfile, req->sid); 858 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, req->sid);
858 if (unlikely(base == NULL)) { 859 if (unlikely(base == NULL)) {
859 DRM_ERROR("Could not find surface to reference.\n"); 860 DRM_ERROR("Could not find surface to reference.\n");
860 return -EINVAL; 861 return -EINVAL;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index 58b029894eb3..0097cc03034e 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -190,14 +190,26 @@ extern int ttm_base_object_init(struct ttm_object_file *tfile,
190 * @key: Hash key 190 * @key: Hash key
191 * 191 *
192 * Looks up a struct ttm_base_object with the key @key. 192 * Looks up a struct ttm_base_object with the key @key.
193 * Also verifies that the object is visible to the application, by
194 * comparing the @tfile argument and checking the object shareable flag.
195 */ 193 */
196 194
197extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file 195extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
198 *tfile, uint32_t key); 196 *tfile, uint32_t key);
199 197
200/** 198/**
199 * ttm_base_object_lookup_for_ref
200 *
201 * @tdev: Pointer to a struct ttm_object_device.
202 * @key: Hash key
203 *
204 * Looks up a struct ttm_base_object with the key @key.
205 * This function should only be used when the struct tfile associated with the
206 * caller doesn't yet have a reference to the base object.
207 */
208
209extern struct ttm_base_object *
210ttm_base_object_lookup_for_ref(struct ttm_object_device *tdev, uint32_t key);
211
212/**
201 * ttm_base_object_unref 213 * ttm_base_object_unref
202 * 214 *
203 * @p_base: Pointer to a pointer referencing a struct ttm_base_object. 215 * @p_base: Pointer to a pointer referencing a struct ttm_base_object.
@@ -218,6 +230,8 @@ extern void ttm_base_object_unref(struct ttm_base_object **p_base);
218 * @existed: Upon completion, indicates that an identical reference object 230 * @existed: Upon completion, indicates that an identical reference object
219 * already existed, and the refcount was upped on that object instead. 231 * already existed, and the refcount was upped on that object instead.
220 * 232 *
233 * Checks that the base object is shareable and adds a ref object to it.
234 *
221 * Adding a ref object to a base object is basically like referencing the 235 * Adding a ref object to a base object is basically like referencing the
222 * base object, but a user-space application holds the reference. When the 236 * base object, but a user-space application holds the reference. When the
223 * file corresponding to @tfile is closed, all its reference objects are 237 * file corresponding to @tfile is closed, all its reference objects are