diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2009-09-14 11:50:28 -0400 |
---|---|---|
committer | Jesse Barnes <jbarnes@virtuousgeek.org> | 2009-09-17 17:43:31 -0400 |
commit | 31169714fc928aed4e945b959dca2bedd259b9c9 (patch) | |
tree | 3cd6aa12bd7da1e4202607597b3baf9a07f48290 /drivers/gpu | |
parent | 725ceaa08a98fcdb1ec1c302700e33b629aece4b (diff) |
drm/i915: Register a shrinker to free inactive lists under memory pressure
This should help GEM handle memory pressure sitatuions more gracefully.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 144 |
3 files changed, 159 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 1f9e4503b072..c57c1744cecf 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -362,6 +362,8 @@ static int __init i915_init(void) | |||
362 | { | 362 | { |
363 | driver.num_ioctls = i915_max_ioctl; | 363 | driver.num_ioctls = i915_max_ioctl; |
364 | 364 | ||
365 | i915_gem_shrinker_init(); | ||
366 | |||
365 | /* | 367 | /* |
366 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless | 368 | * If CONFIG_DRM_I915_KMS is set, default to KMS unless |
367 | * explicitly disabled with the module pararmeter. | 369 | * explicitly disabled with the module pararmeter. |
@@ -388,6 +390,7 @@ static int __init i915_init(void) | |||
388 | 390 | ||
389 | static void __exit i915_exit(void) | 391 | static void __exit i915_exit(void) |
390 | { | 392 | { |
393 | i915_gem_shrinker_exit(); | ||
391 | drm_exit(&driver); | 394 | drm_exit(&driver); |
392 | } | 395 | } |
393 | 396 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 07214694b14f..bbcf5fc72666 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -369,6 +369,15 @@ typedef struct drm_i915_private { | |||
369 | int gtt_mtrr; | 369 | int gtt_mtrr; |
370 | 370 | ||
371 | /** | 371 | /** |
372 | * Membership on list of all loaded devices, used to evict | ||
373 | * inactive buffers under memory pressure. | ||
374 | * | ||
375 | * Modifications should only be done whilst holding the | ||
376 | * shrink_list_lock spinlock. | ||
377 | */ | ||
378 | struct list_head shrink_list; | ||
379 | |||
380 | /** | ||
372 | * List of objects currently involved in rendering from the | 381 | * List of objects currently involved in rendering from the |
373 | * ringbuffer. | 382 | * ringbuffer. |
374 | * | 383 | * |
@@ -741,6 +750,9 @@ int i915_gem_object_get_pages(struct drm_gem_object *obj); | |||
741 | void i915_gem_object_put_pages(struct drm_gem_object *obj); | 750 | void i915_gem_object_put_pages(struct drm_gem_object *obj); |
742 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); | 751 | void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); |
743 | 752 | ||
753 | void i915_gem_shrinker_init(void); | ||
754 | void i915_gem_shrinker_exit(void); | ||
755 | |||
744 | /* i915_gem_tiling.c */ | 756 | /* i915_gem_tiling.c */ |
745 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | 757 | void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
746 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); | 758 | void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 77cc6f57166c..2fff2e0a976e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -53,6 +53,9 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o | |||
53 | struct drm_i915_gem_pwrite *args, | 53 | struct drm_i915_gem_pwrite *args, |
54 | struct drm_file *file_priv); | 54 | struct drm_file *file_priv); |
55 | 55 | ||
56 | static LIST_HEAD(shrink_list); | ||
57 | static DEFINE_SPINLOCK(shrink_list_lock); | ||
58 | |||
56 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 59 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
57 | unsigned long end) | 60 | unsigned long end) |
58 | { | 61 | { |
@@ -4265,6 +4268,10 @@ i915_gem_load(struct drm_device *dev) | |||
4265 | i915_gem_retire_work_handler); | 4268 | i915_gem_retire_work_handler); |
4266 | dev_priv->mm.next_gem_seqno = 1; | 4269 | dev_priv->mm.next_gem_seqno = 1; |
4267 | 4270 | ||
4271 | spin_lock(&shrink_list_lock); | ||
4272 | list_add(&dev_priv->mm.shrink_list, &shrink_list); | ||
4273 | spin_unlock(&shrink_list_lock); | ||
4274 | |||
4268 | /* Old X drivers will take 0-2 for front, back, depth buffers */ | 4275 | /* Old X drivers will take 0-2 for front, back, depth buffers */ |
4269 | dev_priv->fence_reg_start = 3; | 4276 | dev_priv->fence_reg_start = 3; |
4270 | 4277 | ||
@@ -4482,3 +4489,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | |||
4482 | list_del_init(i915_file_priv->mm.request_list.next); | 4489 | list_del_init(i915_file_priv->mm.request_list.next); |
4483 | mutex_unlock(&dev->struct_mutex); | 4490 | mutex_unlock(&dev->struct_mutex); |
4484 | } | 4491 | } |
4492 | |||
4493 | /* Immediately discard the backing storage */ | ||
4494 | static void | ||
4495 | i915_gem_object_truncate(struct drm_gem_object *obj) | ||
4496 | { | ||
4497 | struct inode *inode; | ||
4498 | |||
4499 | inode = obj->filp->f_path.dentry->d_inode; | ||
4500 | |||
4501 | mutex_lock(&inode->i_mutex); | ||
4502 | truncate_inode_pages(inode->i_mapping, 0); | ||
4503 | mutex_unlock(&inode->i_mutex); | ||
4504 | } | ||
4505 | |||
4506 | static inline int | ||
4507 | i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) | ||
4508 | { | ||
4509 | return !obj_priv->dirty; | ||
4510 | } | ||
4511 | |||
4512 | static int | ||
4513 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | ||
4514 | { | ||
4515 | drm_i915_private_t *dev_priv, *next_dev; | ||
4516 | struct drm_i915_gem_object *obj_priv, *next_obj; | ||
4517 | int cnt = 0; | ||
4518 | int would_deadlock = 1; | ||
4519 | |||
4520 | /* "fast-path" to count number of available objects */ | ||
4521 | if (nr_to_scan == 0) { | ||
4522 | spin_lock(&shrink_list_lock); | ||
4523 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | ||
4524 | struct drm_device *dev = dev_priv->dev; | ||
4525 | |||
4526 | if (mutex_trylock(&dev->struct_mutex)) { | ||
4527 | list_for_each_entry(obj_priv, | ||
4528 | &dev_priv->mm.inactive_list, | ||
4529 | list) | ||
4530 | cnt++; | ||
4531 | mutex_unlock(&dev->struct_mutex); | ||
4532 | } | ||
4533 | } | ||
4534 | spin_unlock(&shrink_list_lock); | ||
4535 | |||
4536 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4537 | } | ||
4538 | |||
4539 | spin_lock(&shrink_list_lock); | ||
4540 | |||
4541 | /* first scan for clean buffers */ | ||
4542 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4543 | &shrink_list, mm.shrink_list) { | ||
4544 | struct drm_device *dev = dev_priv->dev; | ||
4545 | |||
4546 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4547 | continue; | ||
4548 | |||
4549 | spin_unlock(&shrink_list_lock); | ||
4550 | |||
4551 | i915_gem_retire_requests(dev); | ||
4552 | |||
4553 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4554 | &dev_priv->mm.inactive_list, | ||
4555 | list) { | ||
4556 | if (i915_gem_object_is_purgeable(obj_priv)) { | ||
4557 | struct drm_gem_object *obj = obj_priv->obj; | ||
4558 | i915_gem_object_unbind(obj); | ||
4559 | i915_gem_object_truncate(obj); | ||
4560 | |||
4561 | if (--nr_to_scan <= 0) | ||
4562 | break; | ||
4563 | } | ||
4564 | } | ||
4565 | |||
4566 | spin_lock(&shrink_list_lock); | ||
4567 | mutex_unlock(&dev->struct_mutex); | ||
4568 | |||
4569 | if (nr_to_scan <= 0) | ||
4570 | break; | ||
4571 | } | ||
4572 | |||
4573 | /* second pass, evict/count anything still on the inactive list */ | ||
4574 | list_for_each_entry_safe(dev_priv, next_dev, | ||
4575 | &shrink_list, mm.shrink_list) { | ||
4576 | struct drm_device *dev = dev_priv->dev; | ||
4577 | |||
4578 | if (! mutex_trylock(&dev->struct_mutex)) | ||
4579 | continue; | ||
4580 | |||
4581 | spin_unlock(&shrink_list_lock); | ||
4582 | |||
4583 | list_for_each_entry_safe(obj_priv, next_obj, | ||
4584 | &dev_priv->mm.inactive_list, | ||
4585 | list) { | ||
4586 | if (nr_to_scan > 0) { | ||
4587 | struct drm_gem_object *obj = obj_priv->obj; | ||
4588 | i915_gem_object_unbind(obj); | ||
4589 | if (i915_gem_object_is_purgeable(obj_priv)) | ||
4590 | i915_gem_object_truncate(obj); | ||
4591 | |||
4592 | nr_to_scan--; | ||
4593 | } else | ||
4594 | cnt++; | ||
4595 | } | ||
4596 | |||
4597 | spin_lock(&shrink_list_lock); | ||
4598 | mutex_unlock(&dev->struct_mutex); | ||
4599 | |||
4600 | would_deadlock = 0; | ||
4601 | } | ||
4602 | |||
4603 | spin_unlock(&shrink_list_lock); | ||
4604 | |||
4605 | if (would_deadlock) | ||
4606 | return -1; | ||
4607 | else if (cnt > 0) | ||
4608 | return (cnt / 100) * sysctl_vfs_cache_pressure; | ||
4609 | else | ||
4610 | return 0; | ||
4611 | } | ||
4612 | |||
4613 | static struct shrinker shrinker = { | ||
4614 | .shrink = i915_gem_shrink, | ||
4615 | .seeks = DEFAULT_SEEKS, | ||
4616 | }; | ||
4617 | |||
4618 | __init void | ||
4619 | i915_gem_shrinker_init(void) | ||
4620 | { | ||
4621 | register_shrinker(&shrinker); | ||
4622 | } | ||
4623 | |||
4624 | __exit void | ||
4625 | i915_gem_shrinker_exit(void) | ||
4626 | { | ||
4627 | unregister_shrinker(&shrinker); | ||
4628 | } | ||