diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/gpu/drm/drm_gem.c | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 83 |
1 files changed, 49 insertions, 34 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e9dbb481c469..aa89d4b0b4c4 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c | |||
@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) | |||
142 | if (IS_ERR(obj->filp)) | 142 | if (IS_ERR(obj->filp)) |
143 | goto free; | 143 | goto free; |
144 | 144 | ||
145 | /* Basically we want to disable the OOM killer and handle ENOMEM | ||
146 | * ourselves by sacrificing pages from cached buffers. | ||
147 | * XXX shmem_file_[gs]et_gfp_mask() | ||
148 | */ | ||
149 | mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, | ||
150 | GFP_HIGHUSER | | ||
151 | __GFP_COLD | | ||
152 | __GFP_FS | | ||
153 | __GFP_RECLAIMABLE | | ||
154 | __GFP_NORETRY | | ||
155 | __GFP_NOWARN | | ||
156 | __GFP_NOMEMALLOC); | ||
157 | |||
158 | kref_init(&obj->refcount); | 145 | kref_init(&obj->refcount); |
159 | kref_init(&obj->handlecount); | 146 | kref_init(&obj->handlecount); |
160 | obj->size = size; | 147 | obj->size = size; |
@@ -205,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) | |||
205 | idr_remove(&filp->object_idr, handle); | 192 | idr_remove(&filp->object_idr, handle); |
206 | spin_unlock(&filp->table_lock); | 193 | spin_unlock(&filp->table_lock); |
207 | 194 | ||
208 | mutex_lock(&dev->struct_mutex); | 195 | drm_gem_object_handle_unreference_unlocked(obj); |
209 | drm_gem_object_handle_unreference(obj); | ||
210 | mutex_unlock(&dev->struct_mutex); | ||
211 | 196 | ||
212 | return 0; | 197 | return 0; |
213 | } | 198 | } |
@@ -338,9 +323,7 @@ again: | |||
338 | } | 323 | } |
339 | 324 | ||
340 | err: | 325 | err: |
341 | mutex_lock(&dev->struct_mutex); | 326 | drm_gem_object_unreference_unlocked(obj); |
342 | drm_gem_object_unreference(obj); | ||
343 | mutex_unlock(&dev->struct_mutex); | ||
344 | return ret; | 327 | return ret; |
345 | } | 328 | } |
346 | 329 | ||
@@ -371,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, | |||
371 | return -ENOENT; | 354 | return -ENOENT; |
372 | 355 | ||
373 | ret = drm_gem_handle_create(file_priv, obj, &handle); | 356 | ret = drm_gem_handle_create(file_priv, obj, &handle); |
374 | mutex_lock(&dev->struct_mutex); | 357 | drm_gem_object_unreference_unlocked(obj); |
375 | drm_gem_object_unreference(obj); | ||
376 | mutex_unlock(&dev->struct_mutex); | ||
377 | if (ret) | 358 | if (ret) |
378 | return ret; | 359 | return ret; |
379 | 360 | ||
@@ -403,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
403 | { | 384 | { |
404 | struct drm_gem_object *obj = ptr; | 385 | struct drm_gem_object *obj = ptr; |
405 | 386 | ||
406 | drm_gem_object_handle_unreference(obj); | 387 | drm_gem_object_handle_unreference_unlocked(obj); |
407 | 388 | ||
408 | return 0; | 389 | return 0; |
409 | } | 390 | } |
@@ -416,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) | |||
416 | void | 397 | void |
417 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | 398 | drm_gem_release(struct drm_device *dev, struct drm_file *file_private) |
418 | { | 399 | { |
419 | mutex_lock(&dev->struct_mutex); | ||
420 | idr_for_each(&file_private->object_idr, | 400 | idr_for_each(&file_private->object_idr, |
421 | &drm_gem_object_release_handle, NULL); | 401 | &drm_gem_object_release_handle, NULL); |
422 | 402 | ||
423 | idr_destroy(&file_private->object_idr); | 403 | idr_destroy(&file_private->object_idr); |
424 | mutex_unlock(&dev->struct_mutex); | 404 | } |
405 | |||
406 | static void | ||
407 | drm_gem_object_free_common(struct drm_gem_object *obj) | ||
408 | { | ||
409 | struct drm_device *dev = obj->dev; | ||
410 | fput(obj->filp); | ||
411 | atomic_dec(&dev->object_count); | ||
412 | atomic_sub(obj->size, &dev->object_memory); | ||
413 | kfree(obj); | ||
425 | } | 414 | } |
426 | 415 | ||
427 | /** | 416 | /** |
428 | * Called after the last reference to the object has been lost. | 417 | * Called after the last reference to the object has been lost. |
418 | * Must be called holding struct_ mutex | ||
429 | * | 419 | * |
430 | * Frees the object | 420 | * Frees the object |
431 | */ | 421 | */ |
@@ -440,14 +430,40 @@ drm_gem_object_free(struct kref *kref) | |||
440 | if (dev->driver->gem_free_object != NULL) | 430 | if (dev->driver->gem_free_object != NULL) |
441 | dev->driver->gem_free_object(obj); | 431 | dev->driver->gem_free_object(obj); |
442 | 432 | ||
443 | fput(obj->filp); | 433 | drm_gem_object_free_common(obj); |
444 | atomic_dec(&dev->object_count); | ||
445 | atomic_sub(obj->size, &dev->object_memory); | ||
446 | kfree(obj); | ||
447 | } | 434 | } |
448 | EXPORT_SYMBOL(drm_gem_object_free); | 435 | EXPORT_SYMBOL(drm_gem_object_free); |
449 | 436 | ||
450 | /** | 437 | /** |
438 | * Called after the last reference to the object has been lost. | ||
439 | * Must be called without holding struct_mutex | ||
440 | * | ||
441 | * Frees the object | ||
442 | */ | ||
443 | void | ||
444 | drm_gem_object_free_unlocked(struct kref *kref) | ||
445 | { | ||
446 | struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
447 | struct drm_device *dev = obj->dev; | ||
448 | |||
449 | if (dev->driver->gem_free_object_unlocked != NULL) | ||
450 | dev->driver->gem_free_object_unlocked(obj); | ||
451 | else if (dev->driver->gem_free_object != NULL) { | ||
452 | mutex_lock(&dev->struct_mutex); | ||
453 | dev->driver->gem_free_object(obj); | ||
454 | mutex_unlock(&dev->struct_mutex); | ||
455 | } | ||
456 | |||
457 | drm_gem_object_free_common(obj); | ||
458 | } | ||
459 | EXPORT_SYMBOL(drm_gem_object_free_unlocked); | ||
460 | |||
461 | static void drm_gem_object_ref_bug(struct kref *list_kref) | ||
462 | { | ||
463 | BUG(); | ||
464 | } | ||
465 | |||
466 | /** | ||
451 | * Called after the last handle to the object has been closed | 467 | * Called after the last handle to the object has been closed |
452 | * | 468 | * |
453 | * Removes any name for the object. Note that this must be | 469 | * Removes any name for the object. Note that this must be |
@@ -471,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref) | |||
471 | /* | 487 | /* |
472 | * The object name held a reference to this object, drop | 488 | * The object name held a reference to this object, drop |
473 | * that now. | 489 | * that now. |
490 | * | ||
491 | * This cannot be the last reference, since the handle holds one too. | ||
474 | */ | 492 | */ |
475 | drm_gem_object_unreference(obj); | 493 | kref_put(&obj->refcount, drm_gem_object_ref_bug); |
476 | } else | 494 | } else |
477 | spin_unlock(&dev->object_name_lock); | 495 | spin_unlock(&dev->object_name_lock); |
478 | 496 | ||
@@ -490,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open); | |||
490 | void drm_gem_vm_close(struct vm_area_struct *vma) | 508 | void drm_gem_vm_close(struct vm_area_struct *vma) |
491 | { | 509 | { |
492 | struct drm_gem_object *obj = vma->vm_private_data; | 510 | struct drm_gem_object *obj = vma->vm_private_data; |
493 | struct drm_device *dev = obj->dev; | ||
494 | 511 | ||
495 | mutex_lock(&dev->struct_mutex); | 512 | drm_gem_object_unreference_unlocked(obj); |
496 | drm_gem_object_unreference(obj); | ||
497 | mutex_unlock(&dev->struct_mutex); | ||
498 | } | 513 | } |
499 | EXPORT_SYMBOL(drm_gem_vm_close); | 514 | EXPORT_SYMBOL(drm_gem_vm_close); |
500 | 515 | ||