aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_gem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r--drivers/gpu/drm/drm_gem.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c55f338e380b..55d6182555c7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -282,15 +282,6 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
282{ 282{
283 struct drm_gem_object *obj; 283 struct drm_gem_object *obj;
284 284
285 /* This is gross. The idr system doesn't let us try a delete and
286 * return an error code. It just spews if you fail at deleting.
287 * So, we have to grab a lock around finding the object and then
288 * doing the delete on it and dropping the refcount, or the user
289 * could race us to double-decrement the refcount and cause a
290 * use-after-free later. Given the frequency of our handle lookups,
291 * we may want to use ida for number allocation and a hash table
292 * for the pointers, anyway.
293 */
294 spin_lock(&filp->table_lock); 285 spin_lock(&filp->table_lock);
295 286
296 /* Check if we currently have a reference on the object */ 287 /* Check if we currently have a reference on the object */
@@ -334,6 +325,12 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
334 if (!obj) 325 if (!obj)
335 return -ENOENT; 326 return -ENOENT;
336 327
328 /* Don't allow imported objects to be mapped */
329 if (obj->import_attach) {
330 ret = -EINVAL;
331 goto out;
332 }
333
337 ret = drm_gem_create_mmap_offset(obj); 334 ret = drm_gem_create_mmap_offset(obj);
338 if (ret) 335 if (ret)
339 goto out; 336 goto out;
@@ -537,7 +534,7 @@ EXPORT_SYMBOL(drm_gem_create_mmap_offset);
537 * Note that you are not allowed to change gfp-zones during runtime. That is, 534 * Note that you are not allowed to change gfp-zones during runtime. That is,
538 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 535 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
539 * set during initialization. If you have special zone constraints, set them 536 * set during initialization. If you have special zone constraints, set them
540 * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care 537 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
541 * to keep pages in the required zone during swap-in. 538 * to keep pages in the required zone during swap-in.
542 */ 539 */
543struct page **drm_gem_get_pages(struct drm_gem_object *obj) 540struct page **drm_gem_get_pages(struct drm_gem_object *obj)