aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2009-09-14 11:50:28 -0400
committerJesse Barnes <jbarnes@virtuousgeek.org>2009-09-17 17:43:31 -0400
commit31169714fc928aed4e945b959dca2bedd259b9c9 (patch)
tree3cd6aa12bd7da1e4202607597b3baf9a07f48290 /drivers/gpu/drm/i915/i915_gem.c
parent725ceaa08a98fcdb1ec1c302700e33b629aece4b (diff)
drm/i915: Register a shrinker to free inactive lists under memory pressure
This should help GEM handle memory pressure sitatuions more gracefully. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c144
1 files changed, 144 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 77cc6f57166c..2fff2e0a976e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -53,6 +53,9 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
53 struct drm_i915_gem_pwrite *args, 53 struct drm_i915_gem_pwrite *args,
54 struct drm_file *file_priv); 54 struct drm_file *file_priv);
55 55
56static LIST_HEAD(shrink_list);
57static DEFINE_SPINLOCK(shrink_list_lock);
58
56int i915_gem_do_init(struct drm_device *dev, unsigned long start, 59int i915_gem_do_init(struct drm_device *dev, unsigned long start,
57 unsigned long end) 60 unsigned long end)
58{ 61{
@@ -4265,6 +4268,10 @@ i915_gem_load(struct drm_device *dev)
4265 i915_gem_retire_work_handler); 4268 i915_gem_retire_work_handler);
4266 dev_priv->mm.next_gem_seqno = 1; 4269 dev_priv->mm.next_gem_seqno = 1;
4267 4270
4271 spin_lock(&shrink_list_lock);
4272 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4273 spin_unlock(&shrink_list_lock);
4274
4268 /* Old X drivers will take 0-2 for front, back, depth buffers */ 4275 /* Old X drivers will take 0-2 for front, back, depth buffers */
4269 dev_priv->fence_reg_start = 3; 4276 dev_priv->fence_reg_start = 3;
4270 4277
@@ -4482,3 +4489,140 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4482 list_del_init(i915_file_priv->mm.request_list.next); 4489 list_del_init(i915_file_priv->mm.request_list.next);
4483 mutex_unlock(&dev->struct_mutex); 4490 mutex_unlock(&dev->struct_mutex);
4484} 4491}
4492
4493/* Immediately discard the backing storage */
4494static void
4495i915_gem_object_truncate(struct drm_gem_object *obj)
4496{
4497 struct inode *inode;
4498
4499 inode = obj->filp->f_path.dentry->d_inode;
4500
4501 mutex_lock(&inode->i_mutex);
4502 truncate_inode_pages(inode->i_mapping, 0);
4503 mutex_unlock(&inode->i_mutex);
4504}
4505
4506static inline int
4507i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
4508{
4509 return !obj_priv->dirty;
4510}
4511
4512static int
4513i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
4514{
4515 drm_i915_private_t *dev_priv, *next_dev;
4516 struct drm_i915_gem_object *obj_priv, *next_obj;
4517 int cnt = 0;
4518 int would_deadlock = 1;
4519
4520 /* "fast-path" to count number of available objects */
4521 if (nr_to_scan == 0) {
4522 spin_lock(&shrink_list_lock);
4523 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
4524 struct drm_device *dev = dev_priv->dev;
4525
4526 if (mutex_trylock(&dev->struct_mutex)) {
4527 list_for_each_entry(obj_priv,
4528 &dev_priv->mm.inactive_list,
4529 list)
4530 cnt++;
4531 mutex_unlock(&dev->struct_mutex);
4532 }
4533 }
4534 spin_unlock(&shrink_list_lock);
4535
4536 return (cnt / 100) * sysctl_vfs_cache_pressure;
4537 }
4538
4539 spin_lock(&shrink_list_lock);
4540
4541 /* first scan for clean buffers */
4542 list_for_each_entry_safe(dev_priv, next_dev,
4543 &shrink_list, mm.shrink_list) {
4544 struct drm_device *dev = dev_priv->dev;
4545
4546 if (! mutex_trylock(&dev->struct_mutex))
4547 continue;
4548
4549 spin_unlock(&shrink_list_lock);
4550
4551 i915_gem_retire_requests(dev);
4552
4553 list_for_each_entry_safe(obj_priv, next_obj,
4554 &dev_priv->mm.inactive_list,
4555 list) {
4556 if (i915_gem_object_is_purgeable(obj_priv)) {
4557 struct drm_gem_object *obj = obj_priv->obj;
4558 i915_gem_object_unbind(obj);
4559 i915_gem_object_truncate(obj);
4560
4561 if (--nr_to_scan <= 0)
4562 break;
4563 }
4564 }
4565
4566 spin_lock(&shrink_list_lock);
4567 mutex_unlock(&dev->struct_mutex);
4568
4569 if (nr_to_scan <= 0)
4570 break;
4571 }
4572
4573 /* second pass, evict/count anything still on the inactive list */
4574 list_for_each_entry_safe(dev_priv, next_dev,
4575 &shrink_list, mm.shrink_list) {
4576 struct drm_device *dev = dev_priv->dev;
4577
4578 if (! mutex_trylock(&dev->struct_mutex))
4579 continue;
4580
4581 spin_unlock(&shrink_list_lock);
4582
4583 list_for_each_entry_safe(obj_priv, next_obj,
4584 &dev_priv->mm.inactive_list,
4585 list) {
4586 if (nr_to_scan > 0) {
4587 struct drm_gem_object *obj = obj_priv->obj;
4588 i915_gem_object_unbind(obj);
4589 if (i915_gem_object_is_purgeable(obj_priv))
4590 i915_gem_object_truncate(obj);
4591
4592 nr_to_scan--;
4593 } else
4594 cnt++;
4595 }
4596
4597 spin_lock(&shrink_list_lock);
4598 mutex_unlock(&dev->struct_mutex);
4599
4600 would_deadlock = 0;
4601 }
4602
4603 spin_unlock(&shrink_list_lock);
4604
4605 if (would_deadlock)
4606 return -1;
4607 else if (cnt > 0)
4608 return (cnt / 100) * sysctl_vfs_cache_pressure;
4609 else
4610 return 0;
4611}
4612
4613static struct shrinker shrinker = {
4614 .shrink = i915_gem_shrink,
4615 .seeks = DEFAULT_SEEKS,
4616};
4617
4618__init void
4619i915_gem_shrinker_init(void)
4620{
4621 register_shrinker(&shrinker);
4622}
4623
4624__exit void
4625i915_gem_shrinker_exit(void)
4626{
4627 unregister_shrinker(&shrinker);
4628}