diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 440 |
1 files changed, 297 insertions, 143 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 603f256152ef..49293bdc972a 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/shmem_fs.h> | 37 | #include <linux/shmem_fs.h> |
38 | #include <linux/dma-buf.h> | 38 | #include <linux/dma-buf.h> |
39 | #include <drm/drmP.h> | 39 | #include <drm/drmP.h> |
40 | #include <drm/drm_vma_manager.h> | ||
40 | 41 | ||
41 | /** @file drm_gem.c | 42 | /** @file drm_gem.c |
42 | * | 43 | * |
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev) | |||
92 | { | 93 | { |
93 | struct drm_gem_mm *mm; | 94 | struct drm_gem_mm *mm; |
94 | 95 | ||
95 | spin_lock_init(&dev->object_name_lock); | 96 | mutex_init(&dev->object_name_lock); |
96 | idr_init(&dev->object_name_idr); | 97 | idr_init(&dev->object_name_idr); |
97 | 98 | ||
98 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); | 99 | mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); |
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev) | |||
102 | } | 103 | } |
103 | 104 | ||
104 | dev->mm_private = mm; | 105 | dev->mm_private = mm; |
105 | 106 | drm_vma_offset_manager_init(&mm->vma_manager, | |
106 | if (drm_ht_create(&mm->offset_hash, 12)) { | 107 | DRM_FILE_PAGE_OFFSET_START, |
107 | kfree(mm); | 108 | DRM_FILE_PAGE_OFFSET_SIZE); |
108 | return -ENOMEM; | ||
109 | } | ||
110 | |||
111 | drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, | ||
112 | DRM_FILE_PAGE_OFFSET_SIZE); | ||
113 | 109 | ||
114 | return 0; | 110 | return 0; |
115 | } | 111 | } |
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev) | |||
119 | { | 115 | { |
120 | struct drm_gem_mm *mm = dev->mm_private; | 116 | struct drm_gem_mm *mm = dev->mm_private; |
121 | 117 | ||
122 | drm_mm_takedown(&mm->offset_manager); | 118 | drm_vma_offset_manager_destroy(&mm->vma_manager); |
123 | drm_ht_remove(&mm->offset_hash); | ||
124 | kfree(mm); | 119 | kfree(mm); |
125 | dev->mm_private = NULL; | 120 | dev->mm_private = NULL; |
126 | } | 121 | } |
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev) | |||
132 | int drm_gem_object_init(struct drm_device *dev, | 127 | int drm_gem_object_init(struct drm_device *dev, |
133 | struct drm_gem_object *obj, size_t size) | 128 | struct drm_gem_object *obj, size_t size) |
134 | { | 129 | { |
135 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | 130 | struct file *filp; |
136 | 131 | ||
137 | obj->dev = dev; | 132 | filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); |
138 | obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); | 133 | if (IS_ERR(filp)) |
139 | if (IS_ERR(obj->filp)) | 134 | return PTR_ERR(filp); |
140 | return PTR_ERR(obj->filp); | ||
141 | 135 | ||
142 | kref_init(&obj->refcount); | 136 | drm_gem_private_object_init(dev, obj, size); |
143 | atomic_set(&obj->handle_count, 0); | 137 | obj->filp = filp; |
144 | obj->size = size; | ||
145 | 138 | ||
146 | return 0; | 139 | return 0; |
147 | } | 140 | } |
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init); | |||
152 | * no GEM provided backing store. Instead the caller is responsible for | 145 | * no GEM provided backing store. Instead the caller is responsible for |
153 | * backing the object and handling it. | 146 | * backing the object and handling it. |
154 | */ | 147 | */ |
155 | int drm_gem_private_object_init(struct drm_device *dev, | 148 | void drm_gem_private_object_init(struct drm_device *dev, |
156 | struct drm_gem_object *obj, size_t size) | 149 | struct drm_gem_object *obj, size_t size) |
157 | { | 150 | { |
158 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); | 151 | BUG_ON((size & (PAGE_SIZE - 1)) != 0); |
159 | 152 | ||
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev, | |||
161 | obj->filp = NULL; | 154 | obj->filp = NULL; |
162 | 155 | ||
163 | kref_init(&obj->refcount); | 156 | kref_init(&obj->refcount); |
164 | atomic_set(&obj->handle_count, 0); | 157 | obj->handle_count = 0; |
165 | obj->size = size; | 158 | obj->size = size; |
166 | 159 | drm_vma_node_reset(&obj->vma_node); | |
167 | return 0; | ||
168 | } | 160 | } |
169 | EXPORT_SYMBOL(drm_gem_private_object_init); | 161 | EXPORT_SYMBOL(drm_gem_private_object_init); |
170 | 162 | ||
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc); | |||
200 | static void | 192 | static void |
201 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) | 193 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
202 | { | 194 | { |
203 | if (obj->import_attach) { | 195 | /* |
204 | drm_prime_remove_buf_handle(&filp->prime, | 196 | * Note: obj->dma_buf can't disappear as long as we still hold a |
205 | obj->import_attach->dmabuf); | 197 | * handle reference in obj->handle_count. |
198 | */ | ||
199 | mutex_lock(&filp->prime.lock); | ||
200 | if (obj->dma_buf) { | ||
201 | drm_prime_remove_buf_handle_locked(&filp->prime, | ||
202 | obj->dma_buf); | ||
206 | } | 203 | } |
207 | if (obj->export_dma_buf) { | 204 | mutex_unlock(&filp->prime.lock); |
208 | drm_prime_remove_buf_handle(&filp->prime, | 205 | } |
209 | obj->export_dma_buf); | 206 | |
207 | static void drm_gem_object_ref_bug(struct kref *list_kref) | ||
208 | { | ||
209 | BUG(); | ||
210 | } | ||
211 | |||
212 | /** | ||
213 | * Called after the last handle to the object has been closed | ||
214 | * | ||
215 | * Removes any name for the object. Note that this must be | ||
216 | * called before drm_gem_object_free or we'll be touching | ||
217 | * freed memory | ||
218 | */ | ||
219 | static void drm_gem_object_handle_free(struct drm_gem_object *obj) | ||
220 | { | ||
221 | struct drm_device *dev = obj->dev; | ||
222 | |||
223 | /* Remove any name for this object */ | ||
224 | if (obj->name) { | ||
225 | idr_remove(&dev->object_name_idr, obj->name); | ||
226 | obj->name = 0; | ||
227 | /* | ||
228 | * The object name held a reference to this object, drop | ||
229 | * that now. | ||
230 | * | ||
231 | * This cannot be the last reference, since the handle holds one too. | ||
232 | */ | ||
233 | kref_put(&obj->refcount, drm_gem_object_ref_bug); | ||
210 | } | 234 | } |
211 | } | 235 | } |
212 | 236 | ||
237 | static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) | ||
238 | { | ||
239 | /* Unbreak the reference cycle if we have an exported dma_buf. */ | ||
240 | if (obj->dma_buf) { | ||
241 | dma_buf_put(obj->dma_buf); | ||
242 | obj->dma_buf = NULL; | ||
243 | } | ||
244 | } | ||
245 | |||
246 | static void | ||
247 | drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | ||
248 | { | ||
249 | if (WARN_ON(obj->handle_count == 0)) | ||
250 | return; | ||
251 | |||
252 | /* | ||
253 | * Must bump handle count first as this may be the last | ||
254 | * ref, in which case the object would disappear before we | ||
255 | * checked for a name | ||
256 | */ | ||
257 | |||
258 | mutex_lock(&obj->dev->object_name_lock); | ||
259 | if (--obj->handle_count == 0) { | ||
260 | drm_gem_object_handle_free(obj); | ||
261 | drm_gem_object_exported_dma_buf_free(obj); | ||
262 | } | ||
263 | mutex_unlock(&obj->dev->object_name_lock); | ||
264 | |||
265 | drm_gem_object_unreference_unlocked(obj); | ||
266 | } | ||
267 | |||
213 | /** | 268 | /** |
214 | * Removes the mapping from handle to filp for this object. | 269 | * Removes the mapping from handle to filp for this object. |
215 | */ | 270 | */ |
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
242 | idr_remove(&filp->object_idr, handle); | 297 | idr_remove(&filp->object_idr, handle); |
243 | spin_unlock(&filp->table_lock); | 298 | spin_unlock(&filp->table_lock); |
244 | 299 | ||
245 | drm_gem_remove_prime_handles(obj, filp); | 300 | if (drm_core_check_feature(dev, DRIVER_PRIME)) |
301 | drm_gem_remove_prime_handles(obj, filp); | ||
302 | drm_vma_node_revoke(&obj->vma_node, filp->filp); | ||
246 | 303 | ||
247 | if (dev->driver->gem_close_object) | 304 | if (dev->driver->gem_close_object) |
248 | dev->driver->gem_close_object(obj, filp); | 305 | dev->driver->gem_close_object(obj, filp); |
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
253 | EXPORT_SYMBOL(drm_gem_handle_delete); | 310 | EXPORT_SYMBOL(drm_gem_handle_delete); |
254 | 311 | ||
255 | /** | 312 | /** |
256 | * Create a handle for this object. This adds a handle reference | 313 | * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers |
257 | * to the object, which includes a regular reference count. Callers | 314 | * |
258 | * will likely want to dereference the object afterwards. | 315 | * This implements the ->dumb_destroy kms driver callback for drivers which use |
316 | * gem to manage their backing storage. | ||
317 | */ | ||
318 | int drm_gem_dumb_destroy(struct drm_file *file, | ||
319 | struct drm_device *dev, | ||
320 | uint32_t handle) | ||
321 | { | ||
322 | return drm_gem_handle_delete(file, handle); | ||
323 | } | ||
324 | EXPORT_SYMBOL(drm_gem_dumb_destroy); | ||
325 | |||
326 | /** | ||
327 | * drm_gem_handle_create_tail - internal functions to create a handle | ||
328 | * | ||
329 | * This expects the dev->object_name_lock to be held already and will drop it | ||
330 | * before returning. Used to avoid races in establishing new handles when | ||
331 | * importing an object from either an flink name or a dma-buf. | ||
259 | */ | 332 | */ |
260 | int | 333 | int |
261 | drm_gem_handle_create(struct drm_file *file_priv, | 334 | drm_gem_handle_create_tail(struct drm_file *file_priv, |
262 | struct drm_gem_object *obj, | 335 | struct drm_gem_object *obj, |
263 | u32 *handlep) | 336 | u32 *handlep) |
264 | { | 337 | { |
265 | struct drm_device *dev = obj->dev; | 338 | struct drm_device *dev = obj->dev; |
266 | int ret; | 339 | int ret; |
267 | 340 | ||
341 | WARN_ON(!mutex_is_locked(&dev->object_name_lock)); | ||
342 | |||
268 | /* | 343 | /* |
269 | * Get the user-visible handle using idr. Preload and perform | 344 | * Get the user-visible handle using idr. Preload and perform |
270 | * allocation under our spinlock. | 345 | * allocation under our spinlock. |
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv, | |||
273 | spin_lock(&file_priv->table_lock); | 348 | spin_lock(&file_priv->table_lock); |
274 | 349 | ||
275 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); | 350 | ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
276 | 351 | drm_gem_object_reference(obj); | |
352 | obj->handle_count++; | ||
277 | spin_unlock(&file_priv->table_lock); | 353 | spin_unlock(&file_priv->table_lock); |
278 | idr_preload_end(); | 354 | idr_preload_end(); |
279 | if (ret < 0) | 355 | mutex_unlock(&dev->object_name_lock); |
356 | if (ret < 0) { | ||
357 | drm_gem_object_handle_unreference_unlocked(obj); | ||
280 | return ret; | 358 | return ret; |
359 | } | ||
281 | *handlep = ret; | 360 | *handlep = ret; |
282 | 361 | ||
283 | drm_gem_object_handle_reference(obj); | 362 | ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); |
363 | if (ret) { | ||
364 | drm_gem_handle_delete(file_priv, *handlep); | ||
365 | return ret; | ||
366 | } | ||
284 | 367 | ||
285 | if (dev->driver->gem_open_object) { | 368 | if (dev->driver->gem_open_object) { |
286 | ret = dev->driver->gem_open_object(obj, file_priv); | 369 | ret = dev->driver->gem_open_object(obj, file_priv); |
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv, | |||
292 | 375 | ||
293 | return 0; | 376 | return 0; |
294 | } | 377 | } |
378 | |||
379 | /** | ||
380 | * Create a handle for this object. This adds a handle reference | ||
381 | * to the object, which includes a regular reference count. Callers | ||
382 | * will likely want to dereference the object afterwards. | ||
383 | */ | ||
384 | int | ||
385 | drm_gem_handle_create(struct drm_file *file_priv, | ||
386 | struct drm_gem_object *obj, | ||
387 | u32 *handlep) | ||
388 | { | ||
389 | mutex_lock(&obj->dev->object_name_lock); | ||
390 | |||
391 | return drm_gem_handle_create_tail(file_priv, obj, handlep); | ||
392 | } | ||
295 | EXPORT_SYMBOL(drm_gem_handle_create); | 393 | EXPORT_SYMBOL(drm_gem_handle_create); |
296 | 394 | ||
297 | 395 | ||
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj) | |||
306 | { | 404 | { |
307 | struct drm_device *dev = obj->dev; | 405 | struct drm_device *dev = obj->dev; |
308 | struct drm_gem_mm *mm = dev->mm_private; | 406 | struct drm_gem_mm *mm = dev->mm_private; |
309 | struct drm_map_list *list = &obj->map_list; | ||
310 | 407 | ||
311 | drm_ht_remove_item(&mm->offset_hash, &list->hash); | 408 | drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node); |
312 | drm_mm_put_block(list->file_offset_node); | ||
313 | kfree(list->map); | ||
314 | list->map = NULL; | ||
315 | } | 409 | } |
316 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); | 410 | EXPORT_SYMBOL(drm_gem_free_mmap_offset); |
317 | 411 | ||
318 | /** | 412 | /** |
319 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object | 413 | * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object |
320 | * @obj: obj in question | 414 | * @obj: obj in question |
415 | * @size: the virtual size | ||
321 | * | 416 | * |
322 | * GEM memory mapping works by handing back to userspace a fake mmap offset | 417 | * GEM memory mapping works by handing back to userspace a fake mmap offset |
323 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | 418 | * it can use in a subsequent mmap(2) call. The DRM core code then looks |
324 | * up the object based on the offset and sets up the various memory mapping | 419 | * up the object based on the offset and sets up the various memory mapping |
325 | * structures. | 420 | * structures. |
326 | * | 421 | * |
327 | * This routine allocates and attaches a fake offset for @obj. | 422 | * This routine allocates and attaches a fake offset for @obj, in cases where |
423 | * the virtual size differs from the physical size (ie. obj->size). Otherwise | ||
424 | * just use drm_gem_create_mmap_offset(). | ||
328 | */ | 425 | */ |
329 | int | 426 | int |
330 | drm_gem_create_mmap_offset(struct drm_gem_object *obj) | 427 | drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) |
331 | { | 428 | { |
332 | struct drm_device *dev = obj->dev; | 429 | struct drm_device *dev = obj->dev; |
333 | struct drm_gem_mm *mm = dev->mm_private; | 430 | struct drm_gem_mm *mm = dev->mm_private; |
334 | struct drm_map_list *list; | ||
335 | struct drm_local_map *map; | ||
336 | int ret; | ||
337 | 431 | ||
338 | /* Set the object up for mmap'ing */ | 432 | return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node, |
339 | list = &obj->map_list; | 433 | size / PAGE_SIZE); |
340 | list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); | 434 | } |
341 | if (!list->map) | 435 | EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); |
342 | return -ENOMEM; | ||
343 | |||
344 | map = list->map; | ||
345 | map->type = _DRM_GEM; | ||
346 | map->size = obj->size; | ||
347 | map->handle = obj; | ||
348 | 436 | ||
349 | /* Get a DRM GEM mmap offset allocated... */ | 437 | /** |
350 | list->file_offset_node = drm_mm_search_free(&mm->offset_manager, | 438 | * drm_gem_create_mmap_offset - create a fake mmap offset for an object |
351 | obj->size / PAGE_SIZE, 0, false); | 439 | * @obj: obj in question |
440 | * | ||
441 | * GEM memory mapping works by handing back to userspace a fake mmap offset | ||
442 | * it can use in a subsequent mmap(2) call. The DRM core code then looks | ||
443 | * up the object based on the offset and sets up the various memory mapping | ||
444 | * structures. | ||
445 | * | ||
446 | * This routine allocates and attaches a fake offset for @obj. | ||
447 | */ | ||
448 | int drm_gem_create_mmap_offset(struct drm_gem_object *obj) | ||
449 | { | ||
450 | return drm_gem_create_mmap_offset_size(obj, obj->size); | ||
451 | } | ||
452 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | ||
352 | 453 | ||
353 | if (!list->file_offset_node) { | 454 | /** |
354 | DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); | 455 | * drm_gem_get_pages - helper to allocate backing pages for a GEM object |
355 | ret = -ENOSPC; | 456 | * from shmem |
356 | goto out_free_list; | 457 | * @obj: obj in question |
458 | * @gfpmask: gfp mask of requested pages | ||
459 | */ | ||
460 | struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) | ||
461 | { | ||
462 | struct inode *inode; | ||
463 | struct address_space *mapping; | ||
464 | struct page *p, **pages; | ||
465 | int i, npages; | ||
466 | |||
467 | /* This is the shared memory object that backs the GEM resource */ | ||
468 | inode = file_inode(obj->filp); | ||
469 | mapping = inode->i_mapping; | ||
470 | |||
471 | /* We already BUG_ON() for non-page-aligned sizes in | ||
472 | * drm_gem_object_init(), so we should never hit this unless | ||
473 | * driver author is doing something really wrong: | ||
474 | */ | ||
475 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | ||
476 | |||
477 | npages = obj->size >> PAGE_SHIFT; | ||
478 | |||
479 | pages = drm_malloc_ab(npages, sizeof(struct page *)); | ||
480 | if (pages == NULL) | ||
481 | return ERR_PTR(-ENOMEM); | ||
482 | |||
483 | gfpmask |= mapping_gfp_mask(mapping); | ||
484 | |||
485 | for (i = 0; i < npages; i++) { | ||
486 | p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); | ||
487 | if (IS_ERR(p)) | ||
488 | goto fail; | ||
489 | pages[i] = p; | ||
490 | |||
491 | /* There is a hypothetical issue w/ drivers that require | ||
492 | * buffer memory in the low 4GB.. if the pages are un- | ||
493 | * pinned, and swapped out, they can end up swapped back | ||
494 | * in above 4GB. If pages are already in memory, then | ||
495 | * shmem_read_mapping_page_gfp will ignore the gfpmask, | ||
496 | * even if the already in-memory page disobeys the mask. | ||
497 | * | ||
498 | * It is only a theoretical issue today, because none of | ||
499 | * the devices with this limitation can be populated with | ||
500 | * enough memory to trigger the issue. But this BUG_ON() | ||
501 | * is here as a reminder in case the problem with | ||
502 | * shmem_read_mapping_page_gfp() isn't solved by the time | ||
503 | * it does become a real issue. | ||
504 | * | ||
505 | * See this thread: http://lkml.org/lkml/2011/7/11/238 | ||
506 | */ | ||
507 | BUG_ON((gfpmask & __GFP_DMA32) && | ||
508 | (page_to_pfn(p) >= 0x00100000UL)); | ||
357 | } | 509 | } |
358 | 510 | ||
359 | list->file_offset_node = drm_mm_get_block(list->file_offset_node, | 511 | return pages; |
360 | obj->size / PAGE_SIZE, 0); | ||
361 | if (!list->file_offset_node) { | ||
362 | ret = -ENOMEM; | ||
363 | goto out_free_list; | ||
364 | } | ||
365 | 512 | ||
366 | list->hash.key = list->file_offset_node->start; | 513 | fail: |
367 | ret = drm_ht_insert_item(&mm->offset_hash, &list->hash); | 514 | while (i--) |
368 | if (ret) { | 515 | page_cache_release(pages[i]); |
369 | DRM_ERROR("failed to add to map hash\n"); | ||
370 | goto out_free_mm; | ||
371 | } | ||
372 | 516 | ||
373 | return 0; | 517 | drm_free_large(pages); |
518 | return ERR_CAST(p); | ||
519 | } | ||
520 | EXPORT_SYMBOL(drm_gem_get_pages); | ||
374 | 521 | ||
375 | out_free_mm: | 522 | /** |
376 | drm_mm_put_block(list->file_offset_node); | 523 | * drm_gem_put_pages - helper to free backing pages for a GEM object |
377 | out_free_list: | 524 | * @obj: obj in question |
378 | kfree(list->map); | 525 | * @pages: pages to free |
379 | list->map = NULL; | 526 | * @dirty: if true, pages will be marked as dirty |
527 | * @accessed: if true, the pages will be marked as accessed | ||
528 | */ | ||
529 | void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, | ||
530 | bool dirty, bool accessed) | ||
531 | { | ||
532 | int i, npages; | ||
380 | 533 | ||
381 | return ret; | 534 | /* We already BUG_ON() for non-page-aligned sizes in |
535 | * drm_gem_object_init(), so we should never hit this unless | ||
536 | * driver author is doing something really wrong: | ||
537 | */ | ||
538 | WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); | ||
539 | |||
540 | npages = obj->size >> PAGE_SHIFT; | ||
541 | |||
542 | for (i = 0; i < npages; i++) { | ||
543 | if (dirty) | ||
544 | set_page_dirty(pages[i]); | ||
545 | |||
546 | if (accessed) | ||
547 | mark_page_accessed(pages[i]); | ||
548 | |||
549 | /* Undo the reference we took when populating the table */ | ||
550 | page_cache_release(pages[i]); | ||
551 | } | ||
552 | |||
553 | drm_free_large(pages); | ||
382 | } | 554 | } |
383 | EXPORT_SYMBOL(drm_gem_create_mmap_offset); | 555 | EXPORT_SYMBOL(drm_gem_put_pages); |
384 | 556 | ||
385 | /** Returns a reference to the object named by the handle. */ | 557 | /** Returns a reference to the object named by the handle. */ |
386 | struct drm_gem_object * | 558 | struct drm_gem_object * |
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |||
445 | if (obj == NULL) | 617 | if (obj == NULL) |
446 | return -ENOENT; | 618 | return -ENOENT; |
447 | 619 | ||
620 | mutex_lock(&dev->object_name_lock); | ||
448 | idr_preload(GFP_KERNEL); | 621 | idr_preload(GFP_KERNEL); |
449 | spin_lock(&dev->object_name_lock); | 622 | /* prevent races with concurrent gem_close. */ |
623 | if (obj->handle_count == 0) { | ||
624 | ret = -ENOENT; | ||
625 | goto err; | ||
626 | } | ||
627 | |||
450 | if (!obj->name) { | 628 | if (!obj->name) { |
451 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); | 629 | ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
452 | if (ret < 0) | 630 | if (ret < 0) |
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data, | |||
462 | ret = 0; | 640 | ret = 0; |
463 | 641 | ||
464 | err: | 642 | err: |
465 | spin_unlock(&dev->object_name_lock); | ||
466 | idr_preload_end(); | 643 | idr_preload_end(); |
644 | mutex_unlock(&dev->object_name_lock); | ||
467 | drm_gem_object_unreference_unlocked(obj); | 645 | drm_gem_object_unreference_unlocked(obj); |
468 | return ret; | 646 | return ret; |
469 | } | 647 | } |
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, | |||
486 | if (!(dev->driver->driver_features & DRIVER_GEM)) | 664 | if (!(dev->driver->driver_features & DRIVER_GEM)) |
487 | return -ENODEV; | 665 | return -ENODEV; |
488 | 666 | ||
489 | spin_lock(&dev->object_name_lock); | 667 | mutex_lock(&dev->object_name_lock); |
490 | obj = idr_find(&dev->object_name_idr, (int) args->name); | 668 | obj = idr_find(&dev->object_name_idr, (int) args->name); |
491 | if (obj) | 669 | if (obj) { |
492 | drm_gem_object_reference(obj); | 670 | drm_gem_object_reference(obj); |
493 | spin_unlock(&dev->object_name_lock); | 671 | } else { |
494 | if (!obj) | 672 | mutex_unlock(&dev->object_name_lock); |
495 | return -ENOENT; | 673 | return -ENOENT; |
674 | } | ||
496 | 675 | ||
497 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 676 | /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ |
677 | ret = drm_gem_handle_create_tail(file_priv, obj, &handle); | ||
498 | drm_gem_object_unreference_unlocked(obj); | 678 | drm_gem_object_unreference_unlocked(obj); |
499 | if (ret) | 679 | if (ret) |
500 | return ret; | 680 | return ret; |
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
527 | struct drm_gem_object *obj = ptr; | 707 | struct drm_gem_object *obj = ptr; |
528 | struct drm_device *dev = obj->dev; | 708 | struct drm_device *dev = obj->dev; |
529 | 709 | ||
530 | drm_gem_remove_prime_handles(obj, file_priv); | 710 | if (drm_core_check_feature(dev, DRIVER_PRIME)) |
711 | drm_gem_remove_prime_handles(obj, file_priv); | ||
712 | drm_vma_node_revoke(&obj->vma_node, file_priv->filp); | ||
531 | 713 | ||
532 | if (dev->driver->gem_close_object) | 714 | if (dev->driver->gem_close_object) |
533 | dev->driver->gem_close_object(obj, file_priv); | 715 | dev->driver->gem_close_object(obj, file_priv); |
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | |||
553 | void | 735 | void |
554 | drm_gem_object_release(struct drm_gem_object *obj) | 736 | drm_gem_object_release(struct drm_gem_object *obj) |
555 | { | 737 | { |
738 | WARN_ON(obj->dma_buf); | ||
739 | |||
556 | if (obj->filp) | 740 | if (obj->filp) |
557 | fput(obj->filp); | 741 | fput(obj->filp); |
558 | } | 742 | } |
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref) | |||
577 | } | 761 | } |
578 | EXPORT_SYMBOL(drm_gem_object_free); | 762 | EXPORT_SYMBOL(drm_gem_object_free); |
579 | 763 | ||
580 | static void drm_gem_object_ref_bug(struct kref *list_kref) | ||
581 | { | ||
582 | BUG(); | ||
583 | } | ||
584 | |||
585 | /** | ||
586 | * Called after the last handle to the object has been closed | ||
587 | * | ||
588 | * Removes any name for the object. Note that this must be | ||
589 | * called before drm_gem_object_free or we'll be touching | ||
590 | * freed memory | ||
591 | */ | ||
592 | void drm_gem_object_handle_free(struct drm_gem_object *obj) | ||
593 | { | ||
594 | struct drm_device *dev = obj->dev; | ||
595 | |||
596 | /* Remove any name for this object */ | ||
597 | spin_lock(&dev->object_name_lock); | ||
598 | if (obj->name) { | ||
599 | idr_remove(&dev->object_name_idr, obj->name); | ||
600 | obj->name = 0; | ||
601 | spin_unlock(&dev->object_name_lock); | ||
602 | /* | ||
603 | * The object name held a reference to this object, drop | ||
604 | * that now. | ||
605 | * | ||
606 | * This cannot be the last reference, since the handle holds one too. | ||
607 | */ | ||
608 | kref_put(&obj->refcount, drm_gem_object_ref_bug); | ||
609 | } else | ||
610 | spin_unlock(&dev->object_name_lock); | ||
611 | |||
612 | } | ||
613 | EXPORT_SYMBOL(drm_gem_object_handle_free); | ||
614 | |||
615 | void drm_gem_vm_open(struct vm_area_struct *vma) | 764 | void drm_gem_vm_open(struct vm_area_struct *vma) |
616 | { | 765 | { |
617 | struct drm_gem_object *obj = vma->vm_private_data; | 766 | struct drm_gem_object *obj = vma->vm_private_data; |
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close); | |||
653 | * the GEM object is not looked up based on its fake offset. To implement the | 802 | * the GEM object is not looked up based on its fake offset. To implement the |
654 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. | 803 | * DRM mmap operation, drivers should use the drm_gem_mmap() function. |
655 | * | 804 | * |
805 | * drm_gem_mmap_obj() assumes the user is granted access to the buffer while | ||
806 | * drm_gem_mmap() prevents unprivileged users from mapping random objects. So | ||
807 | * callers must verify access restrictions before calling this helper. | ||
808 | * | ||
656 | * NOTE: This function has to be protected with dev->struct_mutex | 809 | * NOTE: This function has to be protected with dev->struct_mutex |
657 | * | 810 | * |
658 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA | 811 | * Return 0 or success or -EINVAL if the object size is smaller than the VMA |
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj); | |||
701 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will | 854 | * Look up the GEM object based on the offset passed in (vma->vm_pgoff will |
702 | * contain the fake offset we created when the GTT map ioctl was called on | 855 | * contain the fake offset we created when the GTT map ioctl was called on |
703 | * the object) and map it with a call to drm_gem_mmap_obj(). | 856 | * the object) and map it with a call to drm_gem_mmap_obj(). |
857 | * | ||
858 | * If the caller is not granted access to the buffer object, the mmap will fail | ||
859 | * with EACCES. Please see the vma manager for more information. | ||
704 | */ | 860 | */ |
705 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | 861 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) |
706 | { | 862 | { |
707 | struct drm_file *priv = filp->private_data; | 863 | struct drm_file *priv = filp->private_data; |
708 | struct drm_device *dev = priv->minor->dev; | 864 | struct drm_device *dev = priv->minor->dev; |
709 | struct drm_gem_mm *mm = dev->mm_private; | 865 | struct drm_gem_mm *mm = dev->mm_private; |
710 | struct drm_local_map *map = NULL; | 866 | struct drm_gem_object *obj; |
711 | struct drm_hash_item *hash; | 867 | struct drm_vma_offset_node *node; |
712 | int ret = 0; | 868 | int ret = 0; |
713 | 869 | ||
714 | if (drm_device_is_unplugged(dev)) | 870 | if (drm_device_is_unplugged(dev)) |
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) | |||
716 | 872 | ||
717 | mutex_lock(&dev->struct_mutex); | 873 | mutex_lock(&dev->struct_mutex); |
718 | 874 | ||
719 | if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { | 875 | node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff, |
876 | vma_pages(vma)); | ||
877 | if (!node) { | ||
720 | mutex_unlock(&dev->struct_mutex); | 878 | mutex_unlock(&dev->struct_mutex); |
721 | return drm_mmap(filp, vma); | 879 | return drm_mmap(filp, vma); |
880 | } else if (!drm_vma_node_is_allowed(node, filp)) { | ||
881 | mutex_unlock(&dev->struct_mutex); | ||
882 | return -EACCES; | ||
722 | } | 883 | } |
723 | 884 | ||
724 | map = drm_hash_entry(hash, struct drm_map_list, hash)->map; | 885 | obj = container_of(node, struct drm_gem_object, vma_node); |
725 | if (!map || | 886 | ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); |
726 | ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { | ||
727 | ret = -EPERM; | ||
728 | goto out_unlock; | ||
729 | } | ||
730 | |||
731 | ret = drm_gem_mmap_obj(map->handle, map->size, vma); | ||
732 | 887 | ||
733 | out_unlock: | ||
734 | mutex_unlock(&dev->struct_mutex); | 888 | mutex_unlock(&dev->struct_mutex); |
735 | 889 | ||
736 | return ret; | 890 | return ret; |